From 12f9887c8edb38177041aa35fd7d948b5c64f15b Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 30 Sep 2019 13:14:44 -0700 Subject: [PATCH] bump containerd v1.3.0 full diff: https://github.com/containerd/containerd/compare/7c1e88399ec0b0b077121d9d5ad97e647b11c870...v1.3.0 Signed-off-by: Sebastiaan van Stijn Signed-off-by: Derek McGowan --- vendor.conf | 2 +- .../containerd/containerd/README.md | 2 +- .../api/services/diff/v1/diff.pb.go | 266 ++- .../api/services/diff/v1/diff.proto | 3 + .../introspection/v1/introspection.pb.go | 284 ++- .../introspection/v1/introspection.proto | 7 + .../api/services/leases/v1/leases.pb.go | 1618 ++++++++++++++--- .../api/services/leases/v1/leases.proto | 38 + .../archive/compression/compression.go | 2 +- .../containerd/containerd/archive/tar.go | 257 +-- .../containerd/containerd/archive/tar_opts.go | 38 +- .../containerd/archive/tar_opts_linux.go | 59 + .../containerd/archive/tar_opts_windows.go | 18 +- .../containerd/containerd/archive/tar_unix.go | 77 +- .../containerd/archive/tar_windows.go | 31 +- .../containerd/archive/time_unix.go | 2 +- .../containerd/containerd/cio/io.go | 29 +- .../containerd/containerd/cio/io_unix.go | 22 +- .../containerd/containerd/client.go | 157 +- .../containerd/containerd/client_opts.go | 26 +- .../containerd/containerd/container.go | 41 +- .../containerd/containerd/container_opts.go | 88 +- .../containerd/container_opts_unix.go | 18 +- .../containerd/container_restore_opts.go | 3 +- .../containerd/containers/containers.go | 2 +- .../containerd/containerd/content/helpers.go | 31 +- .../containerd/content/local/store.go | 16 +- .../containerd/contrib/nvidia/nvidia.go | 2 +- .../containerd/contrib/seccomp/seccomp.go | 2 - .../contrib/seccomp/seccomp_default.go | 8 +- .../seccomp/seccomp_default_unsupported.go} | 11 +- .../containerd/defaults/defaults.go | 4 +- .../containerd/defaults/defaults_unix.go | 2 + .../containerd/defaults/defaults_windows.go | 2 + .../github.com/containerd/containerd/diff.go | 13 +- .../containerd/containerd/diff/diff.go | 20 +- .../containerd/containerd/diff/stream.go | 187 ++ .../containerd/containerd/diff/stream_unix.go | 146 ++ .../containerd/diff/stream_windows.go | 165 ++ .../containerd/containerd/errdefs/errors.go | 17 +- .../containerd/containerd/errdefs/grpc.go | 9 + .../containerd/events/exchange/exchange.go | 2 +- .../containerd/containerd/export.go | 26 +- .../github.com/containerd/containerd/gc/gc.go | 7 + .../github.com/containerd/containerd/image.go | 183 +- .../annotations.go} | 13 +- .../containerd/images/archive/exporter.go | 468 +++++ .../containerd/images/archive/importer.go | 154 +- .../containerd/images/archive/reference.go | 30 +- .../containerd/containerd/images/handlers.go | 7 +- .../containerd/containerd/images/image.go | 60 +- .../containerd/images/mediatypes.go | 84 + .../containerd/images/oci/exporter.go | 241 --- .../containerd/containerd/import.go | 51 +- .../containerd/containerd/install.go | 3 +- .../containerd/containerd/leases/lease.go | 10 + .../containerd/leases/proxy/manager.go | 40 + .../containerd/containerd/log/context.go | 2 +- .../containerd/metadata/containers.go | 259 +-- .../containerd/containerd/metadata/content.go | 8 +- .../containerd/containerd/metadata/db.go | 33 +- .../containerd/containerd/metadata/gc.go | 39 +- .../containerd/containerd/metadata/images.go | 18 +- .../containerd/containerd/metadata/leases.go | 298 ++- .../containerd/metadata/namespaces.go | 10 +- .../containerd/metadata/snapshot.go | 43 +- .../containerd/containerd/namespaces.go | 16 +- .../containerd/namespaces/context.go | 14 +- .../containerd/containerd/namespaces/store.go | 11 +- .../containerd/containerd/namespaces/ttrpc.go | 51 + .../containerd/containerd/oci/spec.go | 3 +- .../containerd/containerd/oci/spec_opts.go | 116 +- .../containerd/oci/spec_opts_linux.go | 64 + .../containerd/oci/spec_opts_unix.go | 63 + .../containerd/oci/spec_opts_windows.go | 5 + .../containerd/pkg/dialer/dialer.go | 14 +- .../proc => pkg/process}/deleted_state.go | 5 +- .../v1/linux/proc => pkg/process}/exec.go | 26 +- .../linux/proc => pkg/process}/exec_state.go | 2 +- .../v1/linux/proc => pkg/process}/init.go | 40 +- .../linux/proc => pkg/process}/init_state.go | 42 +- .../v1/linux/proc => pkg/process}/io.go | 28 +- .../proc/proc.go => pkg/process/process.go} | 28 +- .../v1/linux/proc => pkg/process}/types.go | 2 +- .../v1/linux/proc => pkg/process}/utils.go | 57 +- .../pkg/stdio/platform.go} | 21 +- .../pkg/stdio/stdio.go} | 20 +- .../containerd/platforms/compare.go | 37 + .../containerd/platforms/cpuinfo.go | 2 +- .../containerd/platforms/database.go | 6 +- .../containerd/platforms/platforms.go | 2 +- .../containerd/containerd/plugin/context.go | 13 +- .../containerd/containerd/plugin/plugin.go | 49 +- .../containerd/containerd/process.go | 4 +- .../github.com/containerd/containerd/pull.go | 63 +- .../containerd/remotes/docker/authorizer.go | 299 ++- .../containerd/remotes/docker/fetcher.go | 133 +- .../containerd/remotes/docker/handler.go | 42 + .../containerd/remotes/docker/pusher.go | 189 +- .../containerd/remotes/docker/registry.go | 202 ++ .../containerd/remotes/docker/resolver.go | 484 +++-- .../remotes/docker/schema1/converter.go | 4 +- .../containerd/remotes/docker/scope.go | 51 +- .../containerd/containerd/remotes/handlers.go | 95 +- .../containerd/containerd/rootfs/apply.go | 26 +- .../containerd/containerd/rootfs/diff.go | 12 +- .../containerd/containerd/runtime/task.go | 3 + .../containerd/runtime/v1/linux/bundle.go | 7 + .../runtime/v1/linux/proc/process.go | 42 - .../containerd/runtime/v1/linux/runtime.go | 100 +- .../containerd/runtime/v1/linux/task.go | 13 +- .../runtime/v1/shim/client/client.go | 35 +- .../containerd/runtime/v1/shim/service.go | 93 +- .../containerd/runtime/v2/README.md | 2 +- .../services/server/config/config.go | 216 ++- .../containerd/snapshots/snapshotter.go | 13 +- .../reaper.go => sys/reaper/reaper_unix.go} | 123 +- .../containerd/containerd/task_opts_unix.go | 26 + .../containerd/containerd/unpacker.go | 243 +++ .../containerd/containerd/vendor.conf | 78 +- .../containerd/containerd/version/version.go | 2 +- .../containerd/continuity/context.go | 673 ------- .../containerd/continuity/digests.go | 104 -- .../containerd/continuity/groups_unix.go | 129 -- .../containerd/continuity/hardlinks.go | 73 - .../containerd/continuity/hardlinks_unix.go | 52 - .../containerd/continuity/ioutils.go | 63 - .../containerd/continuity/manifest.go | 160 -- .../continuity/proto/manifest.pb.go | 181 -- .../continuity/proto/manifest.proto | 97 - .../containerd/continuity/resource.go | 590 ------ .../containerd/continuity/resource_unix.go | 53 - 132 files changed, 7211 insertions(+), 4115 deletions(-) create mode 100644 vendor/github.com/containerd/containerd/archive/tar_opts_linux.go rename vendor/github.com/containerd/{continuity/proto/gen.go => containerd/contrib/seccomp/seccomp_default_unsupported.go} (69%) create mode 100644 vendor/github.com/containerd/containerd/diff/stream.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_unix.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_windows.go rename vendor/github.com/containerd/containerd/{archive/tar_opts_unix.go => images/annotations.go} (73%) create mode 100644 vendor/github.com/containerd/containerd/images/archive/exporter.go delete mode 100644 vendor/github.com/containerd/containerd/images/oci/exporter.go create mode 100644 vendor/github.com/containerd/containerd/namespaces/ttrpc.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_linux.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_unix.go rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/deleted_state.go (95%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/exec.go (91%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/exec_state.go (99%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/init.go (95%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/init_state.go (92%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/io.go (89%) rename vendor/github.com/containerd/containerd/{runtime/proc/proc.go => pkg/process/process.go} (70%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/types.go (99%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/utils.go (68%) rename vendor/github.com/containerd/{continuity/hardlinks_windows.go => containerd/pkg/stdio/platform.go} (58%) rename vendor/github.com/containerd/{continuity/resource_windows.go => containerd/pkg/stdio/stdio.go} (68%) create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/registry.go delete mode 100644 vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go rename vendor/github.com/containerd/containerd/{runtime/v1/shim/reaper.go => sys/reaper/reaper_unix.go} (56%) create mode 100644 vendor/github.com/containerd/containerd/unpacker.go delete mode 100644 vendor/github.com/containerd/continuity/context.go delete mode 100644 vendor/github.com/containerd/continuity/digests.go delete mode 100644 vendor/github.com/containerd/continuity/groups_unix.go delete mode 100644 vendor/github.com/containerd/continuity/hardlinks.go delete mode 100644 vendor/github.com/containerd/continuity/hardlinks_unix.go delete mode 100644 vendor/github.com/containerd/continuity/ioutils.go delete mode 100644 vendor/github.com/containerd/continuity/manifest.go delete mode 100644 vendor/github.com/containerd/continuity/proto/manifest.pb.go delete mode 100644 vendor/github.com/containerd/continuity/proto/manifest.proto delete mode 100644 vendor/github.com/containerd/continuity/resource.go delete mode 100644 vendor/github.com/containerd/continuity/resource_unix.go diff --git a/vendor.conf b/vendor.conf index 356d88fabcfa9..aeb32387796a0 100644 --- a/vendor.conf +++ b/vendor.conf @@ -117,7 +117,7 @@ github.com/googleapis/gax-go 317e0006254c44a0ac427cc52a0e google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 # containerd -github.com/containerd/containerd 7c1e88399ec0b0b077121d9d5ad97e647b11c870 +github.com/containerd/containerd 36cf5b690dcc00ff0f34ff7799209050c3d0c59a # v1.3.0 github.com/containerd/fifo a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7 github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index 9b2ba3def8c98..2323f26f623d0 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -218,7 +218,7 @@ This will be the best place to discuss design and implementation. For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development. **Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com. -[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ) +[Click here for an invite to docker community slack.](https://dockr.ly/slack) ### Security audit diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go index 9ada87346d88d..6c7920004fc64 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go @@ -9,6 +9,7 @@ import ( types "github.com/containerd/containerd/api/types" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -29,11 +30,12 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type ApplyRequest struct { // Diff is the descriptor of the diff to be extracted - Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` - Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` + Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ApplyRequest) Reset() { *m = ApplyRequest{} } @@ -205,6 +207,7 @@ var xxx_messageInfo_DiffResponse proto.InternalMessageInfo func init() { proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest") + proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry") proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse") proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest") proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry") @@ -216,36 +219,40 @@ func init() { } var fileDescriptor_3b36a99e6faaa935 = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, - 0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40, - 0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a, - 0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47, - 0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef, - 0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea, - 0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1, - 0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63, - 0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35, - 0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa, - 0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab, - 0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6, - 0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f, - 0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb, - 0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b, - 0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d, - 0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2, - 0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb, - 0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd, - 0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b, - 0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f, - 0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb, - 0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77, - 0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b, - 0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac, - 0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f, - 0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1, - 0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00, + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29, + 0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20, + 0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e, + 0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9, + 0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67, + 0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d, + 0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae, + 0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca, + 0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a, + 0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc, + 0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2, + 0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63, + 0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0, + 0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf, + 0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35, + 0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73, + 0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e, + 0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47, + 0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a, + 0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13, + 0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29, + 0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41, + 0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95, + 0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec, + 0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce, + 0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54, + 0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f, + 0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef, + 0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7, + 0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7, + 0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7, + 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -400,6 +407,34 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Payloads) > 0 { + for k, _ := range m.Payloads { + dAtA[i] = 0x1a + i++ + v := m.Payloads[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovDiff(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize + i = encodeVarintDiff(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintDiff(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDiff(dAtA, i, uint64(v.Size())) + n2, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -425,11 +460,11 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size())) - n2, err := m.Applied.MarshalTo(dAtA[i:]) + n3, err := m.Applied.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -530,11 +565,11 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size())) - n3, err := m.Diff.MarshalTo(dAtA[i:]) + n4, err := m.Diff.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -567,6 +602,19 @@ func (m *ApplyRequest) Size() (n int) { n += 1 + l + sovDiff(uint64(l)) } } + if len(m.Payloads) > 0 { + for k, v := range m.Payloads { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDiff(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -662,9 +710,20 @@ func (this *ApplyRequest) String() string { if this == nil { return "nil" } + keysForPayloads := make([]string, 0, len(this.Payloads)) + for k, _ := range this.Payloads { + keysForPayloads = append(keysForPayloads, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads) + mapStringForPayloads := "map[string]*types1.Any{" + for _, k := range keysForPayloads { + mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k]) + } + mapStringForPayloads += "}" s := strings.Join([]string{`&ApplyRequest{`, `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`, + `Payloads:` + mapStringForPayloads + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -824,6 +883,135 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDiff + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDiff + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payloads == nil { + m.Payloads = make(map[string]*types1.Any) + } + var mapkey string + var mapvalue *types1.Any + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDiff + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDiff + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDiff + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDiff + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &types1.Any{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDiff(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDiff + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Payloads[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDiff(dAtA[iNdEx:]) diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto index 66d7ecb19f6b3..ae2707a258cc3 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package containerd.services.diff.v1; import weak "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; @@ -25,6 +26,8 @@ message ApplyRequest { containerd.types.Descriptor diff = 1; repeated containerd.types.Mount mounts = 2; + + map payloads = 3; } message ApplyResponse { diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go index 016ced4b78eb9..a4e238685efb4 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go @@ -10,6 +10,7 @@ import ( rpc "github.com/gogo/googleapis/google/rpc" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -191,11 +192,51 @@ func (m *PluginsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo +type ServerResponse struct { + UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerResponse) Reset() { *m = ServerResponse{} } +func (*ServerResponse) ProtoMessage() {} +func (*ServerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1a14fda866f10715, []int{3} +} +func (m *ServerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerResponse.Merge(m, src) +} +func (m *ServerResponse) XXX_Size() int { + return m.Size() +} +func (m *ServerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerResponse proto.InternalMessageInfo + func init() { proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin") proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry") proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest") proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse") + proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse") } func init() { @@ -203,38 +244,42 @@ func init() { } var fileDescriptor_1a14fda866f10715 = []byte{ - // 487 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21, - 0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10, - 0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb, - 0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b, - 0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda, - 0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22, - 0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08, - 0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5, - 0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c, - 0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e, - 0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a, - 0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89, - 0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d, - 0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49, - 0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93, - 0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67, - 0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32, - 0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b, - 0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea, - 0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5, - 0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87, - 0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e, - 0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e, - 0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c, - 0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82, - 0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe, - 0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78, - 0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc, - 0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6, - 0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00, + // 549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10, + 0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08, + 0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47, + 0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79, + 0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e, + 0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62, + 0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71, + 0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b, + 0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34, + 0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3, + 0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d, + 0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4, + 0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65, + 0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d, + 0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad, + 0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5, + 0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1, + 0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07, + 0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe, + 0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d, + 0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6, + 0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88, + 0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c, + 0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a, + 0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a, + 0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c, + 0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd, + 0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac, + 0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0, + 0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3, + 0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4, + 0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d, + 0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50, + 0xdc, 0x89, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -254,6 +299,8 @@ type IntrospectionClient interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) } type introspectionClient struct { @@ -273,6 +320,15 @@ func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, o return out, nil } +func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { + out := new(ServerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // IntrospectionServer is the server API for Introspection service. type IntrospectionServer interface { // Plugins returns a list of plugins in containerd. @@ -280,6 +336,8 @@ type IntrospectionServer interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(context.Context, *types1.Empty) (*ServerResponse, error) } func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) { @@ -304,6 +362,24 @@ func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(types1.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Server(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Server", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty)) + } + return interceptor(ctx, in, info, handler) +} + var _Introspection_serviceDesc = grpc.ServiceDesc{ ServiceName: "containerd.services.introspection.v1.Introspection", HandlerType: (*IntrospectionServer)(nil), @@ -312,6 +388,10 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{ MethodName: "Plugins", Handler: _Introspection_Plugins_Handler, }, + { + MethodName: "Server", + Handler: _Introspection_Server_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", @@ -488,6 +568,33 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ServerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UUID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID))) + i += copy(dAtA[i:], m.UUID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -583,6 +690,22 @@ func (m *PluginsResponse) Size() (n int) { return n } +func (m *ServerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UUID) + if l > 0 { + n += 1 + l + sovIntrospection(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovIntrospection(x uint64) (n int) { for { n++ @@ -645,6 +768,17 @@ func (this *PluginsResponse) String() string { }, "") return s } +func (this *ServerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerResponse{`, + `UUID:` + fmt.Sprintf("%v", this.UUID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func valueToStringIntrospection(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1206,6 +1340,92 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *ServerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIntrospection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIntrospection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIntrospection(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipIntrospection(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto index 95e804b9b7e41..79cee9a5721aa 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto @@ -4,6 +4,7 @@ package containerd.services.introspection.v1; import "github.com/containerd/containerd/api/types/platform.proto"; import "google/rpc/status.proto"; +import "google/protobuf/empty.proto"; import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; @@ -14,6 +15,8 @@ service Introspection { // Clients can use this to detect features and capabilities when using // containerd. rpc Plugins(PluginsRequest) returns (PluginsResponse); + // Server returns information about the containerd server + rpc Server(google.protobuf.Empty) returns (ServerResponse); } message Plugin { @@ -79,3 +82,7 @@ message PluginsRequest { message PluginsResponse { repeated Plugin plugins = 1 [(gogoproto.nullable) = false]; } + +message ServerResponse { + string uuid = 1 [(gogoproto.customname) = "UUID"]; +} diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go index 3cf21383ec81c..9a3313ef5b3be 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go @@ -275,6 +275,207 @@ func (m *ListResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ListResponse proto.InternalMessageInfo +type Resource struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // For snapshotter resource, there are many snapshotter types here, like + // overlayfs, devmapper etc. The type will be formatted with type, + // like "snapshotter/overlayfs". + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{6} +} +func (m *Resource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return m.Size() +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +type AddResourceRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddResourceRequest) Reset() { *m = AddResourceRequest{} } +func (*AddResourceRequest) ProtoMessage() {} +func (*AddResourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{7} +} +func (m *AddResourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddResourceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddResourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddResourceRequest.Merge(m, src) +} +func (m *AddResourceRequest) XXX_Size() int { + return m.Size() +} +func (m *AddResourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddResourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddResourceRequest proto.InternalMessageInfo + +type DeleteResourceRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResourceRequest) Reset() { *m = DeleteResourceRequest{} } +func (*DeleteResourceRequest) ProtoMessage() {} +func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{8} +} +func (m *DeleteResourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteResourceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteResourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResourceRequest.Merge(m, src) +} +func (m *DeleteResourceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteResourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteResourceRequest proto.InternalMessageInfo + +type ListResourcesRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcesRequest) Reset() { *m = ListResourcesRequest{} } +func (*ListResourcesRequest) ProtoMessage() {} +func (*ListResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{9} +} +func (m *ListResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcesRequest.Merge(m, src) +} +func (m *ListResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *ListResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcesRequest proto.InternalMessageInfo + +type ListResourcesResponse struct { + Resources []Resource `protobuf:"bytes,1,rep,name=resources,proto3" json:"resources"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcesResponse) Reset() { *m = ListResourcesResponse{} } +func (*ListResourcesResponse) ProtoMessage() {} +func (*ListResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{10} +} +func (m *ListResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcesResponse.Merge(m, src) +} +func (m *ListResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *ListResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcesResponse proto.InternalMessageInfo + func init() { proto.RegisterType((*Lease)(nil), "containerd.services.leases.v1.Lease") proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.Lease.LabelsEntry") @@ -284,6 +485,11 @@ func init() { proto.RegisterType((*DeleteRequest)(nil), "containerd.services.leases.v1.DeleteRequest") proto.RegisterType((*ListRequest)(nil), "containerd.services.leases.v1.ListRequest") proto.RegisterType((*ListResponse)(nil), "containerd.services.leases.v1.ListResponse") + proto.RegisterType((*Resource)(nil), "containerd.services.leases.v1.Resource") + proto.RegisterType((*AddResourceRequest)(nil), "containerd.services.leases.v1.AddResourceRequest") + proto.RegisterType((*DeleteResourceRequest)(nil), "containerd.services.leases.v1.DeleteResourceRequest") + proto.RegisterType((*ListResourcesRequest)(nil), "containerd.services.leases.v1.ListResourcesRequest") + proto.RegisterType((*ListResourcesResponse)(nil), "containerd.services.leases.v1.ListResourcesResponse") } func init() { @@ -291,40 +497,48 @@ func init() { } var fileDescriptor_fefd70dfe8d93cbf = []byte{ - // 515 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xdf, 0x8a, 0xd3, 0x40, - 0x14, 0xc6, 0x3b, 0xe9, 0x36, 0x6e, 0x4f, 0x5d, 0x91, 0x61, 0x59, 0x4a, 0xc4, 0xb4, 0x04, 0xc1, - 0xe2, 0x9f, 0x89, 0x5b, 0x6f, 0xd6, 0x5d, 0x11, 0xec, 0x76, 0x41, 0x21, 0x88, 0x04, 0x2f, 0x16, - 0x6f, 0x96, 0x34, 0x3d, 0x1b, 0x83, 0x69, 0x12, 0x33, 0xd3, 0x42, 0xef, 0x7c, 0x04, 0x1f, 0xc1, - 0x87, 0xf0, 0x21, 0x7a, 0xe9, 0xa5, 0x57, 0xab, 0x9b, 0x3b, 0xdf, 0x42, 0x32, 0x93, 0xb0, 0x7f, - 0x44, 0x5b, 0x65, 0xef, 0xce, 0xcc, 0x7c, 0xdf, 0x99, 0xdf, 0xf9, 0xc2, 0x04, 0x86, 0x41, 0x28, - 0xde, 0x4d, 0x47, 0xcc, 0x4f, 0x26, 0xb6, 0x9f, 0xc4, 0xc2, 0x0b, 0x63, 0xcc, 0xc6, 0xe7, 0x4b, - 0x2f, 0x0d, 0x6d, 0x8e, 0xd9, 0x2c, 0xf4, 0x91, 0xdb, 0x11, 0x7a, 0x1c, 0xb9, 0x3d, 0xdb, 0x2e, - 0x2b, 0x96, 0x66, 0x89, 0x48, 0xe8, 0xed, 0x33, 0x3d, 0xab, 0xb4, 0xac, 0x54, 0xcc, 0xb6, 0x8d, - 0xcd, 0x20, 0x09, 0x12, 0xa9, 0xb4, 0x8b, 0x4a, 0x99, 0x8c, 0x5b, 0x41, 0x92, 0x04, 0x11, 0xda, - 0x72, 0x35, 0x9a, 0x1e, 0xdb, 0x38, 0x49, 0xc5, 0xbc, 0x3c, 0xec, 0x5c, 0x3e, 0x14, 0xe1, 0x04, - 0xb9, 0xf0, 0x26, 0xa9, 0x12, 0x58, 0x3f, 0x09, 0x34, 0x9c, 0xe2, 0x06, 0xba, 0x05, 0x5a, 0x38, - 0x6e, 0x93, 0x2e, 0xe9, 0x35, 0x07, 0x7a, 0x7e, 0xd2, 0xd1, 0x5e, 0x0e, 0x5d, 0x2d, 0x1c, 0xd3, - 0x7d, 0x00, 0x3f, 0x43, 0x4f, 0xe0, 0xf8, 0xc8, 0x13, 0x6d, 0xad, 0x4b, 0x7a, 0xad, 0xbe, 0xc1, - 0x54, 0x5f, 0x56, 0xf5, 0x65, 0x6f, 0xaa, 0xbe, 0x83, 0xf5, 0xc5, 0x49, 0xa7, 0xf6, 0xe9, 0x7b, - 0x87, 0xb8, 0xcd, 0xd2, 0xf7, 0x5c, 0xd0, 0x17, 0xa0, 0x47, 0xde, 0x08, 0x23, 0xde, 0xae, 0x77, - 0xeb, 0xbd, 0x56, 0xff, 0x11, 0xfb, 0xeb, 0xa8, 0x4c, 0x22, 0x31, 0x47, 0x5a, 0x0e, 0x62, 0x91, - 0xcd, 0xdd, 0xd2, 0x6f, 0x3c, 0x81, 0xd6, 0xb9, 0x6d, 0x7a, 0x13, 0xea, 0xef, 0x71, 0xae, 0xb0, - 0xdd, 0xa2, 0xa4, 0x9b, 0xd0, 0x98, 0x79, 0xd1, 0x14, 0x25, 0x6a, 0xd3, 0x55, 0x8b, 0x5d, 0x6d, - 0x87, 0x58, 0x5f, 0x08, 0x6c, 0xec, 0x4b, 0x24, 0x17, 0x3f, 0x4c, 0x91, 0x8b, 0x3f, 0xce, 0xfc, - 0xfa, 0x12, 0xee, 0xce, 0x12, 0xdc, 0x0b, 0x5d, 0xaf, 0x1a, 0xdb, 0x81, 0x1b, 0x55, 0x7f, 0x9e, - 0x26, 0x31, 0x47, 0xba, 0x0b, 0x0d, 0x79, 0xb7, 0xf4, 0xb7, 0xfa, 0x77, 0x56, 0x09, 0xd3, 0x55, - 0x16, 0x6b, 0x0f, 0x36, 0x86, 0x18, 0xe1, 0xf2, 0x0c, 0x28, 0xac, 0xf1, 0x79, 0xec, 0x4b, 0x9e, - 0x75, 0x57, 0xd6, 0xd6, 0x5d, 0x68, 0x39, 0x21, 0x17, 0x95, 0xb5, 0x0d, 0xd7, 0x8e, 0xc3, 0x48, - 0x60, 0xc6, 0xdb, 0xa4, 0x5b, 0xef, 0x35, 0xdd, 0x6a, 0x69, 0x39, 0x70, 0x5d, 0x09, 0x4b, 0xe2, - 0xa7, 0xa0, 0x2b, 0x1e, 0x29, 0x5c, 0x15, 0xb9, 0xf4, 0xf4, 0x3f, 0x6b, 0xa0, 0xcb, 0x1d, 0x4e, - 0x11, 0x74, 0x15, 0x06, 0x7d, 0xf0, 0x2f, 0xdf, 0xc4, 0x78, 0xb8, 0xa2, 0xba, 0xe4, 0x7d, 0x05, - 0xba, 0x4a, 0x69, 0xe9, 0x35, 0x17, 0xc2, 0x34, 0xb6, 0x7e, 0x7b, 0x18, 0x07, 0xc5, 0x6b, 0xa4, - 0x47, 0xb0, 0x56, 0xe4, 0x41, 0xef, 0x2d, 0x9b, 0xfb, 0x2c, 0x5d, 0xe3, 0xfe, 0x4a, 0x5a, 0x05, - 0x3c, 0x38, 0x5c, 0x9c, 0x9a, 0xb5, 0x6f, 0xa7, 0x66, 0xed, 0x63, 0x6e, 0x92, 0x45, 0x6e, 0x92, - 0xaf, 0xb9, 0x49, 0x7e, 0xe4, 0x26, 0x79, 0xfb, 0xec, 0x3f, 0x7f, 0x4d, 0x7b, 0xaa, 0x3a, 0xac, - 0x8d, 0x74, 0x39, 0xcc, 0xe3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, 0x74, 0xdd, 0x12, 0xe5, - 0x04, 0x00, 0x00, + // 644 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xce, 0x26, 0xa9, 0x49, 0x26, 0xb4, 0x42, 0xab, 0xb6, 0x8a, 0x8c, 0x48, 0x22, 0x0b, 0xa9, + 0x11, 0x3f, 0x36, 0x4d, 0x2b, 0x54, 0x5a, 0x84, 0xd4, 0xb4, 0x95, 0xa8, 0x88, 0x10, 0xb2, 0x38, + 0x54, 0x1c, 0xa8, 0x1c, 0x7b, 0x1b, 0x2c, 0x9c, 0xd8, 0x78, 0x37, 0x41, 0xe9, 0x89, 0x47, 0xe0, + 0x61, 0x78, 0x88, 0x1e, 0x39, 0x21, 0x4e, 0x85, 0xe6, 0xc6, 0x5b, 0x20, 0xef, 0x0f, 0x6d, 0x5a, + 0xb5, 0x76, 0x11, 0xe2, 0x36, 0x1b, 0x7f, 0xdf, 0xcc, 0x37, 0x33, 0xdf, 0x6e, 0x60, 0xbb, 0xe7, + 0xb3, 0x77, 0xc3, 0xae, 0xe9, 0x86, 0x7d, 0xcb, 0x0d, 0x07, 0xcc, 0xf1, 0x07, 0x24, 0xf6, 0xce, + 0x86, 0x4e, 0xe4, 0x5b, 0x94, 0xc4, 0x23, 0xdf, 0x25, 0xd4, 0x0a, 0x88, 0x43, 0x09, 0xb5, 0x46, + 0xcb, 0x32, 0x32, 0xa3, 0x38, 0x64, 0x21, 0xbe, 0x73, 0x8a, 0x37, 0x15, 0xd6, 0x94, 0x88, 0xd1, + 0xb2, 0x3e, 0xdf, 0x0b, 0x7b, 0x21, 0x47, 0x5a, 0x49, 0x24, 0x48, 0xfa, 0xed, 0x5e, 0x18, 0xf6, + 0x02, 0x62, 0xf1, 0x53, 0x77, 0x78, 0x60, 0x91, 0x7e, 0xc4, 0xc6, 0xf2, 0x63, 0xfd, 0xfc, 0x47, + 0xe6, 0xf7, 0x09, 0x65, 0x4e, 0x3f, 0x12, 0x00, 0xe3, 0x17, 0x82, 0x99, 0x4e, 0x52, 0x01, 0x2f, + 0x42, 0xde, 0xf7, 0xaa, 0xa8, 0x81, 0x9a, 0xe5, 0xb6, 0x36, 0x39, 0xae, 0xe7, 0x77, 0xb7, 0xed, + 0xbc, 0xef, 0xe1, 0x2d, 0x00, 0x37, 0x26, 0x0e, 0x23, 0xde, 0xbe, 0xc3, 0xaa, 0xf9, 0x06, 0x6a, + 0x56, 0x5a, 0xba, 0x29, 0xf2, 0x9a, 0x2a, 0xaf, 0xf9, 0x5a, 0xe5, 0x6d, 0x97, 0x8e, 0x8e, 0xeb, + 0xb9, 0xcf, 0x3f, 0xea, 0xc8, 0x2e, 0x4b, 0xde, 0x26, 0xc3, 0xcf, 0x41, 0x0b, 0x9c, 0x2e, 0x09, + 0x68, 0xb5, 0xd0, 0x28, 0x34, 0x2b, 0xad, 0x47, 0xe6, 0x95, 0xad, 0x9a, 0x5c, 0x92, 0xd9, 0xe1, + 0x94, 0x9d, 0x01, 0x8b, 0xc7, 0xb6, 0xe4, 0xeb, 0x4f, 0xa0, 0x72, 0xe6, 0x67, 0x7c, 0x0b, 0x0a, + 0xef, 0xc9, 0x58, 0xc8, 0xb6, 0x93, 0x10, 0xcf, 0xc3, 0xcc, 0xc8, 0x09, 0x86, 0x84, 0x4b, 0x2d, + 0xdb, 0xe2, 0xb0, 0x9e, 0x5f, 0x43, 0xc6, 0x17, 0x04, 0xb3, 0x5b, 0x5c, 0x92, 0x4d, 0x3e, 0x0c, + 0x09, 0x65, 0x97, 0xf6, 0xfc, 0xea, 0x9c, 0xdc, 0xb5, 0x14, 0xb9, 0x53, 0x59, 0xff, 0xb5, 0xec, + 0x0e, 0xcc, 0xa9, 0xfc, 0x34, 0x0a, 0x07, 0x94, 0xe0, 0x75, 0x98, 0xe1, 0xb5, 0x39, 0xbf, 0xd2, + 0xba, 0x9b, 0x65, 0x98, 0xb6, 0xa0, 0x18, 0x1b, 0x30, 0xbb, 0x4d, 0x02, 0x92, 0x3e, 0x03, 0x0c, + 0x45, 0x3a, 0x1e, 0xb8, 0x5c, 0x4f, 0xc9, 0xe6, 0xb1, 0xb1, 0x04, 0x95, 0x8e, 0x4f, 0x99, 0xa2, + 0x56, 0xe1, 0xc6, 0x81, 0x1f, 0x30, 0x12, 0xd3, 0x2a, 0x6a, 0x14, 0x9a, 0x65, 0x5b, 0x1d, 0x8d, + 0x0e, 0xdc, 0x14, 0x40, 0xa9, 0xf8, 0x29, 0x68, 0x42, 0x0f, 0x07, 0x66, 0x95, 0x2c, 0x39, 0xc6, + 0x63, 0x28, 0xd9, 0x84, 0x86, 0xc3, 0xd8, 0x25, 0x57, 0xc9, 0x65, 0xe3, 0x48, 0x8d, 0x8f, 0xc7, + 0xc6, 0x47, 0xc0, 0x9b, 0x9e, 0xa7, 0xa8, 0x69, 0x0d, 0xef, 0x42, 0x29, 0x96, 0x50, 0x69, 0xf3, + 0xa5, 0x14, 0x95, 0x2a, 0x73, 0xbb, 0x98, 0x78, 0xde, 0xfe, 0x43, 0x37, 0x0e, 0x61, 0x41, 0x0d, + 0xf9, 0xbf, 0xd7, 0x36, 0x61, 0x5e, 0x8e, 0x9e, 0x9f, 0x69, 0x4a, 0x69, 0xc3, 0x83, 0x85, 0x73, + 0x78, 0xb9, 0xb3, 0x17, 0x50, 0x56, 0x49, 0xd5, 0xda, 0xae, 0x29, 0xea, 0x94, 0xdf, 0xfa, 0x56, + 0x04, 0x8d, 0x2f, 0x95, 0x62, 0x02, 0x9a, 0xf0, 0x33, 0x7e, 0x70, 0x9d, 0x6b, 0xa5, 0x3f, 0xcc, + 0x88, 0x96, 0xf2, 0x5f, 0x82, 0x26, 0x76, 0x90, 0x5a, 0x66, 0xea, 0x3e, 0xe8, 0x8b, 0x17, 0xde, + 0xb6, 0x9d, 0xe4, 0x41, 0xc5, 0xfb, 0x50, 0x4c, 0xe6, 0x84, 0xef, 0xa5, 0x59, 0xf7, 0xf4, 0x82, + 0xe8, 0xf7, 0x33, 0x61, 0xa5, 0xe0, 0x3d, 0xa8, 0x9c, 0x71, 0x2b, 0x5e, 0x4e, 0xe1, 0x5e, 0x74, + 0xf6, 0xa5, 0xd2, 0xdf, 0xc2, 0xdc, 0xb4, 0x1d, 0xf1, 0x6a, 0xc6, 0x91, 0x64, 0xcb, 0x7f, 0x08, + 0xb3, 0x53, 0x16, 0xc2, 0x2b, 0xd9, 0xfa, 0x9e, 0x32, 0xa8, 0xbe, 0x7a, 0x3d, 0x92, 0x98, 0x5a, + 0x7b, 0xef, 0xe8, 0xa4, 0x96, 0xfb, 0x7e, 0x52, 0xcb, 0x7d, 0x9a, 0xd4, 0xd0, 0xd1, 0xa4, 0x86, + 0xbe, 0x4e, 0x6a, 0xe8, 0xe7, 0xa4, 0x86, 0xde, 0x3c, 0xfb, 0xcb, 0xff, 0xe4, 0x0d, 0x11, 0xed, + 0xe5, 0xba, 0x1a, 0xef, 0x73, 0xe5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xfe, 0x39, 0x67, + 0xde, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -349,6 +563,12 @@ type LeasesClient interface { // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) } type leasesClient struct { @@ -386,6 +606,33 @@ func (c *leasesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.C return out, nil } +func (c *leasesClient) AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/AddResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/DeleteResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { + out := new(ListResourcesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/ListResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // LeasesServer is the server API for Leases service. type LeasesServer interface { // Create creates a new lease for managing changes to metadata. A lease @@ -398,6 +645,12 @@ type LeasesServer interface { // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. List(context.Context, *ListRequest) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(context.Context, *AddResourceRequest) (*types.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(context.Context, *DeleteResourceRequest) (*types.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) } func RegisterLeasesServer(s *grpc.Server, srv LeasesServer) { @@ -458,6 +711,60 @@ func _Leases_List_Handler(srv interface{}, ctx context.Context, dec func(interfa return interceptor(ctx, in, info, handler) } +func _Leases_AddResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).AddResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/AddResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).AddResource(ctx, req.(*AddResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_DeleteResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).DeleteResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/DeleteResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).DeleteResource(ctx, req.(*DeleteResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).ListResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/ListResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).ListResources(ctx, req.(*ListResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Leases_serviceDesc = grpc.ServiceDesc{ ServiceName: "containerd.services.leases.v1.Leases", HandlerType: (*LeasesServer)(nil), @@ -474,6 +781,18 @@ var _Leases_serviceDesc = grpc.ServiceDesc{ MethodName: "List", Handler: _Leases_List_Handler, }, + { + MethodName: "AddResource", + Handler: _Leases_AddResource_Handler, + }, + { + MethodName: "DeleteResource", + Handler: _Leases_DeleteResource_Handler, + }, + { + MethodName: "ListResources", + Handler: _Leases_ListResources_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/containerd/containerd/api/services/leases/v1/leases.proto", @@ -712,61 +1031,224 @@ func (m *ListResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeVarintLeases(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return offset + 1 + return dAtA[:n], nil } -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovLeases(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } + if len(m.Type) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *CreateRequest) Size() (n int) { - if m == nil { - return 0 +func (m *AddResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *AddResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + dAtA[i] = 0x12 + i++ + i = encodeVarintLeases(dAtA, i, uint64(m.Resource.Size())) + n3, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *DeleteResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintLeases(dAtA, i, uint64(m.Resource.Size())) + n4, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ListResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ListResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintLeases(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovLeases(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) + n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) + n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) } return n } @@ -842,6 +1324,96 @@ func (m *ListResponse) Size() (n int) { return n } +func (m *Resource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AddResourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = m.Resource.Size() + n += 1 + l + sovLeases(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteResourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = m.Resource.Size() + n += 1 + l + sovLeases(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovLeases(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovLeases(x uint64) (n int) { for { n++ @@ -945,29 +1517,87 @@ func (this *ListResponse) String() string { }, "") return s } -func valueToStringLeases(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { +func (this *Resource) String() string { + if this == nil { return "nil" } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + s := strings.Join([]string{`&Resource{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s } -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ +func (this *AddResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AddResourceRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteResourceRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListResourcesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListResourcesRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListResourcesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListResourcesResponse{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringLeases(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break @@ -1172,7 +1802,590 @@ func (m *Lease) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Labels[mapkey] = mapvalue + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthLeases + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthLeases + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lease == nil { + m.Lease = &Lease{} + } + if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sync = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, &Lease{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1199,7 +2412,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } return nil } -func (m *CreateRequest) Unmarshal(dAtA []byte) error { +func (m *Resource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1222,10 +2435,10 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: Resource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1260,11 +2473,11 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { } m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeases @@ -1274,118 +2487,23 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthLeases } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthLeases } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1412,7 +2530,7 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *CreateResponse) Unmarshal(dAtA []byte) error { +func (m *AddResourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1435,15 +2553,47 @@ func (m *CreateResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: AddResourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AddResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1470,10 +2620,7 @@ func (m *CreateResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lease == nil { - m.Lease = &Lease{} - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1502,7 +2649,7 @@ func (m *CreateResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeleteRequest) Unmarshal(dAtA []byte) error { +func (m *DeleteResourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1525,10 +2672,10 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteResourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1564,10 +2711,10 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeases @@ -1577,12 +2724,25 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Sync = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLeases(dAtA[iNdEx:]) @@ -1608,7 +2768,7 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ListRequest) Unmarshal(dAtA []byte) error { +func (m *ListResourcesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1631,15 +2791,15 @@ func (m *ListRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ListRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ListResourcesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1667,7 +2827,7 @@ func (m *ListRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1694,7 +2854,7 @@ func (m *ListRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ListResponse) Unmarshal(dAtA []byte) error { +func (m *ListResourcesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1717,15 +2877,15 @@ func (m *ListResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ListResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ListResourcesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1752,8 +2912,8 @@ func (m *ListResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Leases = append(m.Leases, &Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto index 2df4b06239bc9..ac693e93ded32 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto @@ -22,6 +22,15 @@ service Leases { // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. rpc List(ListRequest) returns (ListResponse); + + // AddResource references the resource by the provided lease. + rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty); + + // DeleteResource dereferences the resource by the provided lease. + rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty); + + // ListResources lists all the resources referenced by the lease. + rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse); } // Lease is an object which retains resources while it exists. @@ -62,3 +71,32 @@ message ListRequest { message ListResponse { repeated Lease leases = 1; } + +message Resource { + string id = 1; + + // For snapshotter resource, there are many snapshotter types here, like + // overlayfs, devmapper etc. The type will be formatted with type, + // like "snapshotter/overlayfs". + string type = 2; +} + +message AddResourceRequest { + string id = 1; + + Resource resource = 2 [(gogoproto.nullable) = false]; +} + +message DeleteResourceRequest { + string id = 1; + + Resource resource = 2 [(gogoproto.nullable) = false]; +} + +message ListResourcesRequest { + string id = 1; +} + +message ListResourcesResponse { + repeated Resource resources = 1 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index 60c80e98a5954..2338de6b90362 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -180,7 +180,7 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { switch compression { case Uncompressed: diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index fae023c557775..7ec46575647f5 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -19,9 +19,7 @@ package archive import ( "archive/tar" "context" - "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -91,11 +89,6 @@ const ( // archives. whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix - // whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other - // layers. Normally these should not go into exported archives and all changed - // hardlinks should be copied to the top layer. - whiteoutLinkDir = whiteoutMetaPrefix + "plnk" - // whiteoutOpaqueDir file means directory has been made opaque - meaning // readdir calls to this directory do not follow to lower layers. whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" @@ -117,11 +110,15 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int if options.Filter == nil { options.Filter = all } + if options.applyFunc == nil { + options.applyFunc = applyNaive + } - return apply(ctx, root, tar.NewReader(r), options) + return options.applyFunc(ctx, root, tar.NewReader(r), options) } -// applyNaive applies a tar stream of an OCI style diff tar. +// applyNaive applies a tar stream of an OCI style diff tar to a directory +// applying each file as either a whole file or whiteout. // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { var ( @@ -131,11 +128,49 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO // may occur out of order unpackedPaths = make(map[string]struct{}) - // Used for aufs plink directory - aufsTempdir = "" - aufsHardlinks = make(map[string]*tar.Header) + convertWhiteout = options.ConvertWhiteout ) + if convertWhiteout == nil { + // handle whiteouts by removing the target files + convertWhiteout = func(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + if base == whiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return false, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + return false, err + } + + if strings.HasPrefix(base, whiteoutPrefix) { + originalBase := base[len(whiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + return false, os.RemoveAll(originalPath) + } + + return true, nil + } + } + // Iterate through the files in the archive. for { select { @@ -193,85 +228,21 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO if base == "" { parentPath = filepath.Dir(path) } - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = mkdirAll(parentPath, 0755) - if err != nil { - return 0, err - } + if err := mkparent(ctx, parentPath, root, options.Parents); err != nil { + return 0, err } } - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - p, err := fs.RootPath(aufsTempdir, basename) - if err != nil { - return 0, err - } - if err := createTarFile(ctx, p, root, hdr, tr); err != nil { - return 0, err - } - } - - if hdr.Name != whiteoutOpaqueDir { - continue - } + // Naive whiteout convert function which handles whiteout files by + // removing the target files. + if err := validateWhiteout(path); err != nil { + return 0, err } - - if strings.HasPrefix(base, whiteoutPrefix) { - dir := filepath.Dir(path) - if base == whiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - continue - } - - originalBase := base[len(whiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - // Ensure originalPath is under dir - if dir[len(dir)-1] != filepath.Separator { - dir += string(filepath.Separator) - } - if !strings.HasPrefix(originalPath, dir) { - return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base) - } - - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } + writeFile, err := convertWhiteout(hdr, path) + if err != nil { + return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name) + } + if !writeFile { continue } // If path exits we almost always just want to remove and replace it. @@ -289,26 +260,6 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO srcData := io.Reader(tr) srcHdr := hdr - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("invalid aufs hardlink") - } - p, err := fs.RootPath(aufsTempdir, linkBasename) - if err != nil { - return 0, err - } - tmpFile, err := os.Open(p) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil { return 0, err } @@ -428,6 +379,66 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)) } +func mkparent(ctx context.Context, path, root string, parents []string) error { + if dir, err := os.Lstat(path); err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkparent", + Path: path, + Err: syscall.ENOTDIR, + } + } else if !os.IsNotExist(err) { + return err + } + + i := len(path) + for i > len(root) && !os.IsPathSeparator(path[i-1]) { + i-- + } + + if i > len(root)+1 { + if err := mkparent(ctx, path[:i-1], root, parents); err != nil { + return err + } + } + + if err := mkdir(path, 0755); err != nil { + // Check that still doesn't exist + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + + for _, p := range parents { + ppath, err := fs.RootPath(p, path[len(root):]) + if err != nil { + return err + } + + dir, err := os.Lstat(ppath) + if err == nil { + if !dir.IsDir() { + // Replaced, do not copy attributes + break + } + if err := copyDirInfo(dir, path); err != nil { + return err + } + return copyUpXAttrs(path, ppath) + } else if !os.IsNotExist(err) { + return err + } + } + + log.G(ctx).Debugf("parent directory %q not found: default permissions(0755) used", path) + + return nil +} + type changeWriter struct { tw *tar.Writer source string @@ -493,6 +504,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + // truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + name := p if strings.HasPrefix(name, string(filepath.Separator)) { name, err = filepath.Rel(string(filepath.Separator), name) @@ -598,6 +615,9 @@ func (cw *changeWriter) Close() error { } func (cw *changeWriter) includeParents(hdr *tar.Header) error { + if cw.addedDirs == nil { + return nil + } name := strings.TrimRight(hdr.Name, "/") fname := filepath.Join(cw.source, name) parent := filepath.Dir(name) @@ -684,3 +704,26 @@ func hardlinkRootPath(root, linkname string) (string, error) { } return targetPath, nil } + +func validateWhiteout(path string) error { + base := filepath.Base(path) + dir := filepath.Dir(path) + + if base == whiteoutOpaqueDir { + return nil + } + + if strings.HasPrefix(base, whiteoutPrefix) { + originalBase := base[len(whiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + // Ensure originalPath is under dir + if dir[len(dir)-1] != filepath.Separator { + dir += string(filepath.Separator) + } + if !strings.HasPrefix(originalPath, dir) { + return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base) + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts.go b/vendor/github.com/containerd/containerd/archive/tar_opts.go index a08bc102a7fe4..ca419e112e279 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_opts.go +++ b/vendor/github.com/containerd/containerd/archive/tar_opts.go @@ -16,7 +16,19 @@ package archive -import "archive/tar" +import ( + "archive/tar" + "context" +) + +// ApplyOptions provides additional options for an Apply operation +type ApplyOptions struct { + Filter Filter // Filter tar headers + ConvertWhiteout ConvertWhiteout // Convert whiteout files + Parents []string // Parent directories to handle inherited attributes without CoW + + applyFunc func(context.Context, string, *tar.Reader, ApplyOptions) (int64, error) +} // ApplyOpt allows setting mutable archive apply properties on creation type ApplyOpt func(options *ApplyOptions) error @@ -24,6 +36,9 @@ type ApplyOpt func(options *ApplyOptions) error // Filter specific files from the archive type Filter func(*tar.Header) (bool, error) +// ConvertWhiteout converts whiteout files from the archive +type ConvertWhiteout func(*tar.Header, string) (bool, error) + // all allows all files func all(_ *tar.Header) (bool, error) { return true, nil @@ -36,3 +51,24 @@ func WithFilter(f Filter) ApplyOpt { return nil } } + +// WithConvertWhiteout uses the convert function to convert the whiteout files. +func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt { + return func(options *ApplyOptions) error { + options.ConvertWhiteout = c + return nil + } +} + +// WithParents provides parent directories for resolving inherited attributes +// directory from the filesystem. +// Inherited attributes are searched from first to last, making the first +// element in the list the most immediate parent directory. +// NOTE: When applying to a filesystem which supports CoW, file attributes +// should be inherited by the filesystem. +func WithParents(p []string) ApplyOpt { + return func(options *ApplyOptions) error { + options.Parents = p + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go b/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go new file mode 100644 index 0000000000000..38ef9e9bc7893 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go @@ -0,0 +1,59 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" +) + +// AufsConvertWhiteout converts whiteout files for aufs. +func AufsConvertWhiteout(_ *tar.Header, _ string) (bool, error) { + return true, nil +} + +// OverlayConvertWhiteout converts whiteout files for overlay. +func OverlayConvertWhiteout(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque, we need to translate that to overlay + if base == whiteoutOpaqueDir { + // don't write the file itself + return false, unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, whiteoutPrefix) { + originalBase := base[len(whiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + // don't write the file itself + return false, os.Chown(originalPath, hdr.Uid, hdr.Gid) + } + + return true, nil +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go b/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go index e4b15a1634b5c..f472013bc92ac 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go @@ -18,28 +18,12 @@ package archive -// ApplyOptions provides additional options for an Apply operation -type ApplyOptions struct { - ParentLayerPaths []string // Parent layer paths used for Windows layer apply - IsWindowsContainerLayer bool // True if the tar stream to be applied is a Windows Container Layer - Filter Filter // Filter tar headers -} - -// WithParentLayers adds parent layers to the apply process this is required -// for all Windows layers except the base layer. -func WithParentLayers(parentPaths []string) ApplyOpt { - return func(options *ApplyOptions) error { - options.ParentLayerPaths = parentPaths - return nil - } -} - // AsWindowsContainerLayer indicates that the tar stream to apply is that of // a Windows Container Layer. The caller must be holding SeBackupPrivilege and // SeRestorePrivilege. func AsWindowsContainerLayer() ApplyOpt { return func(options *ApplyOptions) error { - options.IsWindowsContainerLayer = true + options.applyFunc = applyWindowsLayer return nil } } diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index 022dd6d4f4946..e872187530d18 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -20,11 +20,12 @@ package archive import ( "archive/tar" - "context" "os" + "strings" "sync" "syscall" + "github.com/containerd/continuity/fs" "github.com/containerd/continuity/sysx" "github.com/opencontainers/runc/libcontainer/system" "github.com/pkg/errors" @@ -74,10 +75,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { return f, err } -func mkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - func mkdir(path string, perm os.FileMode) error { if err := os.Mkdir(path, perm); err != nil { return err @@ -149,11 +146,71 @@ func getxattr(path, attr string) ([]byte, error) { } func setxattr(path, key, value string) error { - return sysx.LSetxattr(path, key, []byte(value), 0) + // Do not set trusted attributes + if strings.HasPrefix(key, "trusted.") { + return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported") + } + return unix.Lsetxattr(path, key, []byte(value), 0) +} + +func copyDirInfo(fi os.FileInfo, path string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(path, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(path); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", path) + } + } + + if err := os.Chmod(path, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", path) + } + + timespec := []unix.Timespec{unix.Timespec(fs.StatAtime(st)), unix.Timespec(fs.StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", path) + } + + return nil } -// apply applies a tar stream of an OCI style diff tar. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets -func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { - return applyNaive(ctx, root, tr, options) +func copyUpXAttrs(dst, src string) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + if err == unix.ENOTSUP || err == sysx.ENODATA { + return nil + } + return errors.Wrapf(err, "failed to list xattrs on %s", src) + } + for _, xattr := range xattrKeys { + // Do not copy up trusted attributes + if strings.HasPrefix(xattr, "trusted.") { + continue + } + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + if err == unix.ENOTSUP || err == sysx.ENODATA { + continue + } + return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + } + if err := unix.Lsetxattr(dst, xattr, data, unix.XATTR_CREATE); err != nil { + if err == unix.ENOTSUP || err == unix.ENODATA || err == unix.EEXIST { + continue + } + return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + } + } + + return nil } diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go index b97631fcc5aaa..a5c6da694119f 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -23,7 +23,6 @@ import ( "bufio" "context" "encoding/base64" - "errors" "fmt" "io" "os" @@ -36,6 +35,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim" "github.com/containerd/containerd/sys" + "github.com/pkg/errors" ) const ( @@ -107,10 +107,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { return sys.OpenFileSequential(name, flag, perm) } -func mkdirAll(path string, perm os.FileMode) error { - return sys.MkdirAll(path, perm) -} - func mkdir(path string, perm os.FileMode) error { return os.Mkdir(path, perm) } @@ -153,16 +149,8 @@ func setxattr(path, key, value string) error { return errors.New("xattrs not supported on Windows") } -// apply applies a tar stream of an OCI style diff tar of a Windows layer. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets -func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { - if options.IsWindowsContainerLayer { - return applyWindowsLayer(ctx, root, tr, options) - } - return applyNaive(ctx, root, tr, options) -} - -// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer. +// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows +// layer using the hcsshim layer writer and backup streams. // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { home, id := filepath.Split(root) @@ -170,7 +158,7 @@ func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options HomeDir: home, } - w, err := hcsshim.NewLayerWriter(info, id, options.ParentLayerPaths) + w, err := hcsshim.NewLayerWriter(info, id, options.Parents) if err != nil { return 0, err } @@ -443,3 +431,14 @@ func writeBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( } } } + +func copyDirInfo(fi os.FileInfo, path string) error { + if err := os.Chmod(path, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", path) + } + return nil +} + +func copyUpXAttrs(dst, src string) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go index fd8d98bf30935..e05ca719c2e6e 100644 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error { utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrap(err, "failed call to UtimesNanoAt") + return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path) } return nil diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index 133bfcdbe8ee5..c7cf4f0bcb9b8 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -18,10 +18,13 @@ package cio import ( "context" + "errors" "fmt" "io" "net/url" "os" + "path/filepath" + "strings" "sync" "github.com/containerd/containerd/defaults" @@ -242,17 +245,24 @@ func LogURI(uri *url.URL) Creator { // BinaryIO forwards container STDOUT|STDERR directly to a logging binary func BinaryIO(binary string, args map[string]string) Creator { return func(_ string) (IO, error) { + binary = filepath.Clean(binary) + if !strings.HasPrefix(binary, "/") { + return nil, errors.New("absolute path needed") + } uri := &url.URL{ Scheme: "binary", - Host: binary, + Path: binary, } + q := uri.Query() for k, v := range args { - uri.Query().Set(k, v) + q.Set(k, v) } + uri.RawQuery = q.Encode() + res := uri.String() return &logURI{ config: Config{ - Stdout: uri.String(), - Stderr: uri.String(), + Stdout: res, + Stderr: res, }, }, nil } @@ -262,14 +272,19 @@ func BinaryIO(binary string, args map[string]string) Creator { // If the log file already exists, the logs will be appended to the file. func LogFile(path string) Creator { return func(_ string) (IO, error) { + path = filepath.Clean(path) + if !strings.HasPrefix(path, "/") { + return nil, errors.New("absolute path needed") + } uri := &url.URL{ Scheme: "file", - Host: path, + Path: path, } + res := uri.String() return &logURI{ config: Config{ - Stdout: uri.String(), - Stderr: uri.String(), + Stdout: res, + Stderr: res, }, }, nil } diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index eb2ada80bfee1..42d320933bf8c 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -72,17 +72,19 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { } var wg = &sync.WaitGroup{} - wg.Add(1) - go func() { - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - - io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) - pipes.Stdout.Close() - wg.Done() - }() + if fifos.Stdout != "" { + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) + pipes.Stdout.Close() + wg.Done() + }() + } - if !fifos.Terminal { + if !fifos.Terminal && fifos.Stderr != "" { wg.Add(1) go func() { p := bufPool.Get().(*[]byte) diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index ff78f7e776437..99141e2db52cc 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -43,6 +43,7 @@ import ( "github.com/containerd/containerd/content" contentproxy "github.com/containerd/containerd/content/proxy" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" @@ -56,6 +57,7 @@ import ( "github.com/containerd/containerd/snapshots" snproxy "github.com/containerd/containerd/snapshots/proxy" "github.com/containerd/typeurl" + "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -86,13 +88,23 @@ func New(address string, opts ...ClientOpt) (*Client, error) { if copts.timeout == 0 { copts.timeout = 10 * time.Second } - rt := fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS) + + c := &Client{ + defaultns: copts.defaultns, + } + if copts.defaultRuntime != "" { - rt = copts.defaultRuntime + c.runtime = copts.defaultRuntime + } else { + c.runtime = defaults.DefaultRuntime } - c := &Client{ - runtime: rt, + + if copts.defaultPlatform != nil { + c.platform = copts.defaultPlatform + } else { + c.platform = platforms.Default() } + if copts.services != nil { c.services = *copts.services } @@ -102,7 +114,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) { grpc.WithInsecure(), grpc.FailOnNonTempDialError(true), grpc.WithBackoffMaxDelay(3 * time.Second), - grpc.WithDialer(dialer.Dialer), + grpc.WithContextDialer(dialer.ContextDialer), // TODO(stevvooe): We may need to allow configuration of this on the client. grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), @@ -134,19 +146,15 @@ func New(address string, opts ...ClientOpt) (*Client, error) { c.conn, c.connector = conn, connector } if copts.services == nil && c.conn == nil { - return nil, errors.New("no grpc connection or services is available") + return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available") } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - namespaces := c.NamespaceService() - ctx := context.Background() - if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil { - if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok { - c.runtime = defaultRuntime - } - } else { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err + } else if label != "" { + c.runtime = label } } @@ -163,20 +171,17 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) { } } c := &Client{ - conn: conn, - runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), + defaultns: copts.defaultns, + conn: conn, + runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - namespaces := c.NamespaceService() - ctx := context.Background() - if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil { - if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok { - c.runtime = defaultRuntime - } - } else { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err + } else if label != "" { + c.runtime = label } } @@ -193,13 +198,15 @@ type Client struct { connMu sync.Mutex conn *grpc.ClientConn runtime string + defaultns string + platform platforms.MatchComparer connector func() (*grpc.ClientConn, error) } // Reconnect re-establishes the GRPC connection to the containerd daemon func (c *Client) Reconnect() error { if c.connector == nil { - return errors.New("unable to reconnect to containerd, no connector available") + return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available") } c.connMu.Lock() defer c.connMu.Unlock() @@ -222,10 +229,10 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return false, errors.New("no grpc connection available") + return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() - r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false)) + r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true)) if err != nil { return false, err } @@ -294,10 +301,14 @@ type RemoteContext struct { PlatformMatcher platforms.MatchComparer // Unpack is done after an image is pulled to extract into a snapshotter. + // It is done simultaneously for schema 2 images when they are pulled. // If an image is not unpacked on pull, it can be unpacked any time // afterwards. Unpacking is required to run an image. Unpack bool + // UnpackOpts handles options to the unpack call. + UnpackOpts []UnpackOpt + // Snapshotter used for unpacking Snapshotter string @@ -329,9 +340,8 @@ type RemoteContext struct { // MaxConcurrentDownloads is the max concurrent content downloads for each pull. MaxConcurrentDownloads int - // AppendDistributionSourceLabel allows fetcher to add distribute source - // label for each blob content, which doesn't work for legacy schema1. - AppendDistributionSourceLabel bool + // AllMetadata downloads all manifests and known-configuration files + AllMetadata bool } func defaultRemoteContext() *RemoteContext { @@ -339,7 +349,6 @@ func defaultRemoteContext() *RemoteContext { Resolver: docker.NewResolver(docker.ResolverOptions{ Client: http.DefaultClient, }), - Snapshotter: DefaultSnapshotter, } } @@ -354,7 +363,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag } if fetchCtx.Unpack { - return images.Image{}, errors.New("unpack on fetch not supported, try pull") + return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull") } if fetchCtx.PlatformMatcher == nil { @@ -407,6 +416,11 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, } } + // Annotate ref with digest to push only push tag for single digest + if !strings.Contains(ref, "@") { + ref = ref + "@" + desc.Digest.String() + } + pusher, err := pushCtx.Resolver.Pusher(ctx, ref) if err != nil { return err @@ -490,6 +504,27 @@ func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref s return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels)) } +// GetLabel gets a label value from namespace store +// If there is no default label, an empty string returned with nil error +func (c *Client) GetLabel(ctx context.Context, label string) (string, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + if c.defaultns == "" { + return "", err + } + ns = c.defaultns + } + + srv := c.NamespaceService() + labels, err := srv.Labels(ctx, ns) + if err != nil { + return "", err + } + + value := labels[label] + return value, nil +} + // Subscribe to events that match one or more of the provided filters. // // Callers should listen on both the envelope and errs channels. If the errs @@ -543,6 +578,10 @@ func (c *Client) ContentStore() content.Store { // SnapshotService returns the underlying snapshotter for the provided snapshotter name func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { + snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName) + if err != nil { + snapshotterName = DefaultSnapshotter + } if c.snapshotters != nil { return c.snapshotters[snapshotterName] } @@ -642,7 +681,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return Version{}, errors.New("no grpc connection available") + return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) @@ -655,6 +694,58 @@ func (c *Client) Version(ctx context.Context) (Version, error) { }, nil } +type ServerInfo struct { + UUID string +} + +func (c *Client) Server(ctx context.Context) (ServerInfo, error) { + c.connMu.Lock() + if c.conn == nil { + c.connMu.Unlock() + return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") + } + c.connMu.Unlock() + + response, err := c.IntrospectionService().Server(ctx, &types.Empty{}) + if err != nil { + return ServerInfo{}, err + } + return ServerInfo{ + UUID: response.UUID, + }, nil +} + +func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) { + if name == "" { + label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel) + if err != nil { + return "", err + } + + if label != "" { + name = label + } else { + name = DefaultSnapshotter + } + } + + return name, nil +} + +func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Snapshotter, error) { + name, err := c.resolveSnapshotterName(ctx, name) + if err != nil { + return nil, err + } + + s := c.SnapshotService(name) + if s == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name) + } + + return s, nil +} + // CheckRuntime returns true if the current runtime matches the expected // runtime. Providing various parts of the runtime schema will match those // parts of the expected runtime diff --git a/vendor/github.com/containerd/containerd/client_opts.go b/vendor/github.com/containerd/containerd/client_opts.go index ed2ff05d5a469..6f485c18dcb10 100644 --- a/vendor/github.com/containerd/containerd/client_opts.go +++ b/vendor/github.com/containerd/containerd/client_opts.go @@ -26,11 +26,12 @@ import ( ) type clientOpts struct { - defaultns string - defaultRuntime string - services *services - dialOptions []grpc.DialOption - timeout time.Duration + defaultns string + defaultRuntime string + defaultPlatform platforms.MatchComparer + services *services + dialOptions []grpc.DialOption + timeout time.Duration } // ClientOpt allows callers to set options on the containerd client @@ -55,6 +56,14 @@ func WithDefaultRuntime(rt string) ClientOpt { } } +// WithDefaultPlatform sets the default platform matcher on the client +func WithDefaultPlatform(platform platforms.MatchComparer) ClientOpt { + return func(c *clientOpts) error { + c.defaultPlatform = platform + return nil + } +} + // WithDialOpts allows grpc.DialOptions to be set on the connection func WithDialOpts(opts []grpc.DialOption) ClientOpt { return func(c *clientOpts) error { @@ -195,11 +204,10 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt { } } -// WithAppendDistributionSourceLabel allows fetcher to add distribute source -// label for each blob content, which doesn't work for legacy schema1. -func WithAppendDistributionSourceLabel() RemoteOpt { +// WithAllMetadata downloads all manifests and known-configuration files +func WithAllMetadata() RemoteOpt { return func(_ *Client, c *RemoteContext) error { - c.AppendDistributionSourceLabel = true + c.AllMetadata = true return nil } } diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index 2073d40b45d6e..fd880d0e0a388 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -25,6 +25,7 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" + tasktypes "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" @@ -49,7 +50,7 @@ type Container interface { // ID identifies the container ID() string // Info returns the underlying container record type - Info(context.Context) (containers.Container, error) + Info(context.Context, ...InfoOpts) (containers.Container, error) // Delete removes the container Delete(context.Context, ...DeleteOpts) error // NewTask creates a new task based on the container metadata @@ -80,16 +81,18 @@ type Container interface { func containerFromRecord(client *Client, c containers.Container) *container { return &container{ - client: client, - id: c.ID, + client: client, + id: c.ID, + metadata: c, } } var _ = (Container)(&container{}) type container struct { - client *Client - id string + client *Client + id string + metadata containers.Container } // ID returns the container's unique id @@ -97,8 +100,22 @@ func (c *container) ID() string { return c.id } -func (c *container) Info(ctx context.Context) (containers.Container, error) { - return c.get(ctx) +func (c *container) Info(ctx context.Context, opts ...InfoOpts) (containers.Container, error) { + i := &InfoConfig{ + // default to refreshing the container's local metadata + Refresh: true, + } + for _, o := range opts { + o(i) + } + if i.Refresh { + metadata, err := c.get(ctx) + if err != nil { + return c.metadata, err + } + c.metadata = metadata + } + return c.metadata, nil } func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) { @@ -217,7 +234,11 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N } // get the rootfs from the snapshotter and add it to the request - mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey) + s, err := c.client.getSnapshotter(ctx, r.Snapshotter) + if err != nil { + return nil, err + } + mounts, err := s.Mounts(ctx, r.SnapshotKey) if err != nil { return nil, err } @@ -362,7 +383,9 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er return nil, err } var i cio.IO - if ioAttach != nil { + if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown { + // Do not attach IO for task in unknown state, because there + // are no fifo paths anyway. if i, err = attachExistingIO(response, ioAttach); err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index 1ce989432634b..8954840235d47 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -20,11 +20,8 @@ import ( "context" "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" - "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" @@ -41,6 +38,15 @@ type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Co // UpdateContainerOpts allows the caller to set additional options when updating a container type UpdateContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error +// InfoOpts controls how container metadata is fetched and returned +type InfoOpts func(*InfoConfig) + +// InfoConfig specifies how container metadata is fetched +type InfoConfig struct { + // Refresh will to a fetch of the latest container metadata + Refresh bool +} + // WithRuntime allows a user to specify the runtime name and additional options that should // be used to create tasks for the container func WithRuntime(name string, options interface{}) NewContainerOpts { @@ -71,6 +77,14 @@ func WithImage(i Image) NewContainerOpts { } } +// WithImageName allows setting the image name as the base for the container +func WithImageName(n string) NewContainerOpts { + return func(ctx context.Context, _ *Client, c *containers.Container) error { + c.Image = n + return nil + } +} + // WithContainerLabels adds the provided labels to the container func WithContainerLabels(labels map[string]string) NewContainerOpts { return func(_ context.Context, _ *Client, c *containers.Container) error { @@ -109,9 +123,17 @@ func WithSnapshotter(name string) NewContainerOpts { // WithSnapshot uses an existing root filesystem for the container func WithSnapshot(id string) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - setSnapshotterIfEmpty(ctx, client, c) // check that the snapshot exists, if not, fail on creation - if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil { + var err error + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.Mounts(ctx, id); err != nil { return err } c.SnapshotKey = id @@ -123,13 +145,21 @@ func WithSnapshot(id string) NewContainerOpts { // root filesystem in read-write mode func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.RootFS(ctx) if err != nil { return err } - setSnapshotterIfEmpty(ctx, client, c) + parent := identity.ChainID(diffIDs).String() - if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil { + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.Prepare(ctx, id, parent, opts...); err != nil { return err } c.SnapshotKey = id @@ -144,7 +174,13 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta if c.Snapshotter == "" { return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot") } - return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey) + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) { + return err + } } return nil } @@ -153,13 +189,21 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta // root filesystem in read-only mode func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } - setSnapshotterIfEmpty(ctx, client, c) + parent := identity.ChainID(diffIDs).String() - if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil { + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.View(ctx, id, parent, opts...); err != nil { return err } c.SnapshotKey = id @@ -168,21 +212,6 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer } } -func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) { - if c.Snapshotter == "" { - defaultSnapshotter := DefaultSnapshotter - namespaceService := client.NamespaceService() - if ns, err := namespaces.NamespaceRequired(ctx); err == nil { - if labels, err := namespaceService.Labels(ctx, ns); err == nil { - if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok { - defaultSnapshotter = snapshotLabel - } - } - } - c.Snapshotter = defaultSnapshotter - } -} - // WithContainerExtension appends extension data to the container object. // Use this to decorate the container object with additional data for the client // integration. @@ -235,3 +264,8 @@ func WithSpec(s *oci.Spec, opts ...oci.SpecOpts) NewContainerOpts { return err } } + +// WithoutRefreshedMetadata will use the current metadata attached to the container object +func WithoutRefreshedMetadata(i *InfoConfig) { + i.Refresh = false +} diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index 340a9185728c2..b109a10ecb4bb 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/platforms" "github.com/opencontainers/image-spec/identity" ) @@ -45,18 +44,23 @@ func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerO func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } - setSnapshotterIfEmpty(ctx, client, c) - var ( - snapshotter = client.SnapshotService(c.Snapshotter) - parent = identity.ChainID(diffIDs).String() - usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) + parent = identity.ChainID(diffIDs).String() + usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) ) + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + snapshotter, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } if _, err := snapshotter.Stat(ctx, usernsID); err == nil { if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil { c.SnapshotKey = id diff --git a/vendor/github.com/containerd/containerd/container_restore_opts.go b/vendor/github.com/containerd/containerd/container_restore_opts.go index 4f251c4a6b385..03722dba1a544 100644 --- a/vendor/github.com/containerd/containerd/container_restore_opts.go +++ b/vendor/github.com/containerd/containerd/container_restore_opts.go @@ -22,7 +22,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/gogo/protobuf/proto" ptypes "github.com/gogo/protobuf/types" "github.com/opencontainers/image-spec/identity" @@ -58,7 +57,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint return err } - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go index c7ad2bfaa2470..7174bbd6aa6fb 100644 --- a/vendor/github.com/containerd/containerd/containers/containers.go +++ b/vendor/github.com/containerd/containerd/containers/containers.go @@ -49,7 +49,7 @@ type Container struct { // This property is required and immutable. Runtime RuntimeInfo - // Spec should carry the the runtime specification used to implement the + // Spec should carry the runtime specification used to implement the // container. // // This field is required but mutable. diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 3e231408d55ec..c1c2046186a82 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -55,7 +55,14 @@ func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ( p := make([]byte, ra.Size()) - _, err = ra.ReadAt(p, 0) + n, err := ra.ReadAt(p, 0) + if err == io.EOF { + if int64(n) != ra.Size() { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } return p, err } @@ -162,6 +169,28 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { return err } +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + return copyWithBuffer(cw, r) +} + // seekReader attempts to seek the reader to the given offset, either by // resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding // up to the given offset. diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go index 5503cb56f91ad..efc58ea79ec2c 100644 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -35,7 +35,6 @@ import ( "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" - "github.com/containerd/continuity" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -661,6 +660,19 @@ func writeTimestampFile(p string, t time.Time) error { if err != nil { return err } + return atomicWrite(p, b, 0666) +} - return continuity.AtomicWriteFile(p, b, 0666) +func atomicWrite(path string, data []byte, mode os.FileMode) error { + tmp := fmt.Sprintf("%s.tmp", path) + f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) + if err != nil { + return errors.Wrap(err, "create tmp file") + } + _, err = f.Write(data) + f.Close() + if err != nil { + return errors.Wrap(err, "write atomic data") + } + return os.Rename(tmp, path) } diff --git a/vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go b/vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go index a48594b000992..6a351771ffc38 100644 --- a/vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go +++ b/vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go @@ -52,7 +52,7 @@ const ( Display Capability = "display" ) -// AllCaps returns the complete list of supported Nvidia capabilties. +// AllCaps returns the complete list of supported Nvidia capabilities. func AllCaps() []Capability { return []Capability{ Compute, diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go index 275a4c3e6c74b..b7cf1765d7561 100644 --- a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go index 011139d169990..af40395de04e2 100644 --- a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go @@ -20,7 +20,8 @@ package seccomp import ( "runtime" - "syscall" + + "golang.org/x/sys/unix" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -311,6 +312,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "sigaltstack", "signalfd", "signalfd4", + "sigprocmask", "sigreturn", "socket", "socketcall", @@ -555,7 +557,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { Args: []specs.LinuxSeccompArg{ { Index: 1, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: specs.OpMaskedEqual, }, @@ -570,7 +572,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { Args: []specs.LinuxSeccompArg{ { Index: 0, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: specs.OpMaskedEqual, }, diff --git a/vendor/github.com/containerd/continuity/proto/gen.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go similarity index 69% rename from vendor/github.com/containerd/continuity/proto/gen.go rename to vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go index 63ce10fb53120..14d7b75e18e6a 100644 --- a/vendor/github.com/containerd/continuity/proto/gen.go +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux + /* Copyright The containerd Authors. @@ -14,6 +16,11 @@ limitations under the License. */ -package proto +package seccomp + +import specs "github.com/opencontainers/runtime-spec/specs-go" -//go:generate protoc --go_out=. manifest.proto +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { + return &specs.LinuxSeccomp{} +} diff --git a/vendor/github.com/containerd/containerd/defaults/defaults.go b/vendor/github.com/containerd/containerd/defaults/defaults.go index 3a748e4e80e20..6f5b122ecf936 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults.go @@ -23,10 +23,10 @@ const ( // DefaultMaxSendMsgSize defines the default maximum message size for // sending protobufs passed over the GRPC API. DefaultMaxSendMsgSize = 16 << 20 - // DefaultRuntimeNSLabel defines the namespace label to check for + // DefaultRuntimeNSLabel defines the namespace label to check for the // default runtime DefaultRuntimeNSLabel = "containerd.io/defaults/runtime" - // DefaultSnapshotterNSLabel defines the namespances label to check for + // DefaultSnapshotterNSLabel defines the namespace label to check for the // default snapshotter DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 30ed42235ef0c..319e8777bf869 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -32,4 +32,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. DefaultFIFODir = "/run/containerd/fifo" + // DefaultRuntime is the default linux runtime + DefaultRuntime = "io.containerd.runc.v2" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index 16f1048ca6ff3..5eede8de83b81 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -40,4 +40,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. Unused on Windows. DefaultFIFODir = "" + // DefaultRuntime is the default windows runtime + DefaultRuntime = "io.containerd.runhcs.v1" ) diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index 4d890ce2b969a..445df019220c8 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -45,10 +45,17 @@ type diffRemote struct { client diffapi.DiffClient } -func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) { +func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { + var config diff.ApplyConfig + for _, opt := range opts { + if err := opt(ctx, desc, &config); err != nil { + return ocispec.Descriptor{}, err + } + } req := &diffapi.ApplyRequest{ - Diff: fromDescriptor(diff), - Mounts: fromMounts(mounts), + Diff: fromDescriptor(desc), + Mounts: fromMounts(mounts), + Payloads: config.ProcessorPayloads, } resp, err := r.client.Apply(ctx, req) if err != nil { diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go index 2b6f01c74efb7..17aab616e543d 100644 --- a/vendor/github.com/containerd/containerd/diff/diff.go +++ b/vendor/github.com/containerd/containerd/diff/diff.go @@ -20,6 +20,7 @@ import ( "context" "github.com/containerd/containerd/mount" + "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -51,6 +52,15 @@ type Comparer interface { Compare(ctx context.Context, lower, upper []mount.Mount, opts ...Opt) (ocispec.Descriptor, error) } +// ApplyConfig is used to hold parameters needed for a apply operation +type ApplyConfig struct { + // ProcessorPayloads specifies the payload sent to various processors + ProcessorPayloads map[string]*types.Any +} + +// ApplyOpt is used to configure an Apply operation +type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error + // Applier allows applying diffs between mounts type Applier interface { // Apply applies the content referred to by the given descriptor to @@ -58,7 +68,7 @@ type Applier interface { // implementation and content descriptor. For example, in the common // case the descriptor is a file system difference in tar format, // that tar would be applied on top of the mounts. - Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount) (ocispec.Descriptor, error) + Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error) } // WithMediaType sets the media type to use for creating the diff, without @@ -87,3 +97,11 @@ func WithLabels(labels map[string]string) Opt { return nil } } + +// WithPayloads sets the apply processor payloads to the config +func WithPayloads(payloads map[string]*types.Any) ApplyOpt { + return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error { + c.ProcessorPayloads = payloads + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/diff/stream.go b/vendor/github.com/containerd/containerd/diff/stream.go new file mode 100644 index 0000000000000..1b625feaa7ae6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream.go @@ -0,0 +1,187 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + "io" + "os" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/images" + "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + handlers []Handler + + // ErrNoProcessor is returned when no stream processor is available for a media-type + ErrNoProcessor = errors.New("no processor for media-type") +) + +func init() { + // register the default compression handler + RegisterProcessor(compressedHandler) +} + +// RegisterProcessor registers a stream processor for media-types +func RegisterProcessor(handler Handler) { + handlers = append(handlers, handler) +} + +// GetProcessor returns the processor for a media-type +func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + // reverse this list so that user configured handlers come up first + for i := len(handlers) - 1; i >= 0; i-- { + processor, ok := handlers[i](ctx, stream.MediaType()) + if ok { + return processor(ctx, stream, payloads) + } + } + return nil, ErrNoProcessor +} + +// Handler checks a media-type and initializes the processor +type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) + +// StaticHandler returns the processor init func for a static media-type +func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler { + return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + if mediaType == expectedMediaType { + return fn, true + } + return nil, false + } +} + +// StreamProcessorInit returns the initialized stream processor +type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) + +// RawProcessor provides access to direct fd for processing +type RawProcessor interface { + // File returns the fd for the read stream of the underlying processor + File() *os.File +} + +// StreamProcessor handles processing a content stream and transforming it into a different media-type +type StreamProcessor interface { + io.ReadCloser + + // MediaType is the resulting media-type that the processor processes the stream into + MediaType() string +} + +func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + compressed, err := images.DiffCompression(ctx, mediaType) + if err != nil { + return nil, false + } + if compressed != "" { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + ds, err := compression.DecompressStream(stream) + if err != nil { + return nil, err + } + + return &compressedProcessor{ + rc: ds, + }, nil + }, true + } + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return &stdProcessor{ + rc: stream, + }, nil + }, true +} + +// NewProcessorChain initialized the root StreamProcessor +func NewProcessorChain(mt string, r io.Reader) StreamProcessor { + return &processorChain{ + mt: mt, + rc: r, + } +} + +type processorChain struct { + mt string + rc io.Reader +} + +func (c *processorChain) MediaType() string { + return c.mt +} + +func (c *processorChain) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *processorChain) Close() error { + return nil +} + +type stdProcessor struct { + rc StreamProcessor +} + +func (c *stdProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *stdProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *stdProcessor) Close() error { + return nil +} + +type compressedProcessor struct { + rc io.ReadCloser +} + +func (c *compressedProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *compressedProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *compressedProcessor) Close() error { + return c.rc.Close() +} + +func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler { + set := make(map[string]struct{}, len(mediaTypes)) + for _, m := range mediaTypes { + set[m] = struct{}{} + } + return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) { + if _, ok := set[mediaType]; ok { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + payload := payloads[id] + return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload) + }, true + } + return nil, false + } +} + +const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE" diff --git a/vendor/github.com/containerd/containerd/diff/stream_unix.go b/vendor/github.com/containerd/containerd/diff/stream_unix.go new file mode 100644 index 0000000000000..28f38d998a806 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_unix.go @@ -0,0 +1,146 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + var payloadC io.Closer + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + go func() { + io.Copy(w, bytes.NewReader(data)) + w.Close() + }() + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + payloadC = r + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + if payloadC != nil { + payloadC.Close() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} diff --git a/vendor/github.com/containerd/containerd/diff/stream_windows.go b/vendor/github.com/containerd/containerd/diff/stream_windows.go new file mode 100644 index 0000000000000..8dadd72c92c29 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_windows.go @@ -0,0 +1,165 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + winio "github.com/Microsoft/go-winio" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const processorPipe = "STREAM_PROCESSOR_PIPE" + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + up, err := getUiqPath() + if err != nil { + return nil, err + } + path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up) + l, err := winio.ListenPipe(path, nil) + if err != nil { + return nil, err + } + go func() { + defer l.Close() + conn, err := l.Accept() + if err != nil { + logrus.WithError(err).Error("accept npipe connection") + return + } + io.Copy(conn, bytes.NewReader(data)) + conn.Close() + }() + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path)) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} + +func getUiqPath() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + os.Remove(dir) + return filepath.Base(dir), nil +} diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index 40427fc5a54e7..b5200afc0eee6 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -26,7 +26,11 @@ // client-side errors to the correct types. package errdefs -import "github.com/pkg/errors" +import ( + "context" + + "github.com/pkg/errors" +) // Definitions of common error types used throughout containerd. All containerd // errors returned by most packages will map into one of these errors classes. @@ -76,3 +80,14 @@ func IsUnavailable(err error) bool { func IsNotImplemented(err error) bool { return errors.Cause(err) == ErrNotImplemented } + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Cause(err) == context.Canceled +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Cause(err) == context.DeadlineExceeded +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index b1542f13d6abf..209f63bd0fc0d 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -17,6 +17,7 @@ package errdefs import ( + "context" "strings" "github.com/pkg/errors" @@ -55,6 +56,10 @@ func ToGRPC(err error) error { return status.Errorf(codes.Unavailable, err.Error()) case IsNotImplemented(err): return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) } return err @@ -89,6 +94,10 @@ func FromGRPC(err error) error { cls = ErrFailedPrecondition case codes.Unimplemented: cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded default: cls = ErrUnknown } diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go index 39972d74b235d..59273c952f5a2 100644 --- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -50,7 +50,7 @@ var _ events.Publisher = &Exchange{} var _ events.Forwarder = &Exchange{} var _ events.Subscriber = &Exchange{} -// Forward accepts an envelope to be direcly distributed on the exchange. +// Forward accepts an envelope to be directly distributed on the exchange. // // This is useful when an event is forwarded on behalf of another namespace or // when the event is propagated on behalf of another publisher. diff --git a/vendor/github.com/containerd/containerd/export.go b/vendor/github.com/containerd/containerd/export.go index f5552231ee915..81f199226d4ae 100644 --- a/vendor/github.com/containerd/containerd/export.go +++ b/vendor/github.com/containerd/containerd/export.go @@ -20,26 +20,12 @@ import ( "context" "io" - "github.com/containerd/containerd/images/oci" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" + "github.com/containerd/containerd/images/archive" ) -// Export exports an image to a Tar stream. -// OCI format is used by default. -// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc. -// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream. -func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...oci.V1ExporterOpt) (io.ReadCloser, error) { - - exporter, err := oci.ResolveV1ExportOpt(opts...) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - pw.CloseWithError(errors.Wrap(exporter.Export(ctx, c.ContentStore(), desc, pw), "export failed")) - }() - return pr, nil +// Export exports images to a Tar stream. +// The tar archive is in OCI format with a Docker compatible manifest +// when a single target platform is given. +func (c *Client) Export(ctx context.Context, w io.Writer, opts ...archive.ExportOpt) error { + return archive.Export(ctx, c.ContentStore(), w, opts...) } diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go index 35a1712cb3e4b..c6fcf79103cd1 100644 --- a/vendor/github.com/containerd/containerd/gc/gc.go +++ b/vendor/github.com/containerd/containerd/gc/gc.go @@ -30,6 +30,11 @@ import ( // ResourceType represents type of resource at a node type ResourceType uint8 +// ResourceMax represents the max resource. +// Upper bits are stripped out during the mark phase, allowing the upper 3 bits +// to be used by the caller reference function. +const ResourceMax = ResourceType(0x1F) + // Node presents a resource which has a type and key, // this node can be used to lookup other nodes. type Node struct { @@ -80,6 +85,8 @@ func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struc } } + // strip bits above max resource type + id.Type = id.Type & ResourceMax // mark as black when done reachable[id] = struct{}{} } diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 14bfea91b9e1a..9ef09ac2f8829 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -19,16 +19,21 @@ package containerd import ( "context" "fmt" + "strings" + "sync/atomic" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" - digest "github.com/opencontainers/go-digest" + "github.com/containerd/containerd/snapshots" + "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "golang.org/x/sync/semaphore" ) // Image describes an image used by containers @@ -40,11 +45,13 @@ type Image interface { // Labels of the image Labels() map[string]string // Unpack unpacks the image's content into a snapshot - Unpack(context.Context, string) error + Unpack(context.Context, string, ...UnpackOpt) error // RootFS returns the unpacked diffids that make up images rootfs. RootFS(ctx context.Context) ([]digest.Digest, error) // Size returns the total size of the image's packed resources. Size(ctx context.Context) (int64, error) + // Usage returns a usage calculation for the image. + Usage(context.Context, ...UsageOpt) (int64, error) // Config descriptor for the image. Config(ctx context.Context) (ocispec.Descriptor, error) // IsUnpacked returns whether or not an image is unpacked. @@ -53,6 +60,49 @@ type Image interface { ContentStore() content.Store } +type usageOptions struct { + manifestLimit *int + manifestOnly bool + snapshots bool +} + +// UsageOpt is used to configure the usage calculation +type UsageOpt func(*usageOptions) error + +// WithUsageManifestLimit sets the limit to the number of manifests which will +// be walked for usage. Setting this value to 0 will require all manifests to +// be walked, returning ErrNotFound if manifests are missing. +// NOTE: By default all manifests which exist will be walked +// and any non-existent manifests and their subobjects will be ignored. +func WithUsageManifestLimit(i int) UsageOpt { + // If 0 then don't filter any manifests + // By default limits to current platform + return func(o *usageOptions) error { + o.manifestLimit = &i + return nil + } +} + +// WithSnapshotUsage will check for referenced snapshots from the image objects +// and include the snapshot size in the total usage. +func WithSnapshotUsage() UsageOpt { + return func(o *usageOptions) error { + o.snapshots = true + return nil + } +} + +// WithManifestUsage is used to get the usage for an image based on what is +// reported by the manifests rather than what exists in the content store. +// NOTE: This function is best used with the manifest limit set to get a +// consistent value, otherwise non-existent manifests will be excluded. +func WithManifestUsage() UsageOpt { + return func(o *usageOptions) error { + o.manifestOnly = true + return nil + } +} + var _ = (Image)(&image{}) // NewImage returns a client image object from the metadata image @@ -60,7 +110,7 @@ func NewImage(client *Client, i images.Image) Image { return &image{ client: client, i: i, - platform: platforms.Default(), + platform: client.platform, } } @@ -98,8 +148,95 @@ func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) { } func (i *image) Size(ctx context.Context) (int64, error) { - provider := i.client.ContentStore() - return i.i.Size(ctx, provider, i.platform) + return i.Usage(ctx, WithUsageManifestLimit(1), WithManifestUsage()) +} + +func (i *image) Usage(ctx context.Context, opts ...UsageOpt) (int64, error) { + var config usageOptions + for _, opt := range opts { + if err := opt(&config); err != nil { + return 0, err + } + } + + var ( + provider = i.client.ContentStore() + handler = images.ChildrenHandler(provider) + size int64 + mustExist bool + ) + + if config.manifestLimit != nil { + handler = images.LimitManifests(handler, i.platform, *config.manifestLimit) + mustExist = true + } + + var wh images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var usage int64 + children, err := handler(ctx, desc) + if err != nil { + if !errdefs.IsNotFound(err) || mustExist { + return nil, err + } + if !config.manifestOnly { + // Do not count size of non-existent objects + desc.Size = 0 + } + } else if config.snapshots || !config.manifestOnly { + info, err := provider.Info(ctx, desc.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + if !config.manifestOnly { + // Do not count size of non-existent objects + desc.Size = 0 + } + } else if info.Size > desc.Size { + // Count actual usage, Size may be unset or -1 + desc.Size = info.Size + } + + for k, v := range info.Labels { + const prefix = "containerd.io/gc.ref.snapshot." + if !strings.HasPrefix(k, prefix) { + continue + } + + sn := i.client.SnapshotService(k[len(prefix):]) + if sn == nil { + continue + } + + u, err := sn.Usage(ctx, v) + if err != nil { + if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) { + return nil, err + } + } else { + usage += u.Size + } + } + } + + // Ignore unknown sizes. Generally unknown sizes should + // never be set in manifests, however, the usage + // calculation does not need to enforce this. + if desc.Size >= 0 { + usage += desc.Size + } + + atomic.AddInt64(&size, usage) + + return children, nil + } + + l := semaphore.NewWeighted(3) + if err := images.Dispatch(ctx, wh, l, i.i.Target); err != nil { + return 0, err + } + + return size, nil } func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) { @@ -108,7 +245,10 @@ func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) { } func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, error) { - sn := i.client.SnapshotService(snapshotterName) + sn, err := i.client.getSnapshotter(ctx, snapshotterName) + if err != nil { + return false, err + } cs := i.client.ContentStore() diffs, err := i.i.RootFS(ctx, cs, i.platform) @@ -127,28 +267,53 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e return false, nil } -func (i *image) Unpack(ctx context.Context, snapshotterName string) error { +// UnpackConfig provides configuration for the unpack of an image +type UnpackConfig struct { + // ApplyOpts for applying a diff to a snapshotter + ApplyOpts []diff.ApplyOpt + // SnapshotOpts for configuring a snapshotter + SnapshotOpts []snapshots.Opt +} + +// UnpackOpt provides configuration for unpack +type UnpackOpt func(context.Context, *UnpackConfig) error + +func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error { ctx, done, err := i.client.WithLease(ctx) if err != nil { return err } defer done(ctx) + var config UnpackConfig + for _, o := range opts { + if err := o(ctx, &config); err != nil { + return err + } + } + layers, err := i.getLayers(ctx, i.platform) if err != nil { return err } var ( - sn = i.client.SnapshotService(snapshotterName) a = i.client.DiffService() cs = i.client.ContentStore() chain []digest.Digest unpacked bool ) + snapshotterName, err = i.client.resolveSnapshotterName(ctx, snapshotterName) + if err != nil { + return err + } + sn, err := i.client.getSnapshotter(ctx, snapshotterName) + if err != nil { + return err + } for _, layer := range layers { - unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a) + unpacked, err = rootfs.ApplyLayerWithOpts(ctx, layer, chain, sn, a, config.SnapshotOpts, config.ApplyOpts) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go b/vendor/github.com/containerd/containerd/images/annotations.go similarity index 73% rename from vendor/github.com/containerd/containerd/archive/tar_opts_unix.go rename to vendor/github.com/containerd/containerd/images/annotations.go index 173826967945b..47d92104cddc6 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go +++ b/vendor/github.com/containerd/containerd/images/annotations.go @@ -1,5 +1,3 @@ -// +build !windows - /* Copyright The containerd Authors. @@ -16,9 +14,10 @@ limitations under the License. */ -package archive +package images -// ApplyOptions provides additional options for an Apply operation -type ApplyOptions struct { - Filter Filter // Filter tar headers -} +const ( + // AnnotationImageName is an annotation on a Descriptor in an index.json + // containing the `Name` value as used by an `Image` struct + AnnotationImageName = "io.containerd.image.name" +) diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go new file mode 100644 index 0000000000000..244ef322450a8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -0,0 +1,468 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "archive/tar" + "context" + "encoding/json" + "io" + "path" + "sort" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type exportOptions struct { + manifests []ocispec.Descriptor + platform platforms.MatchComparer + allPlatforms bool + skipDockerManifest bool +} + +// ExportOpt defines options for configuring exported descriptors +type ExportOpt func(context.Context, *exportOptions) error + +// WithPlatform defines the platform to require manifest lists have +// not exporting all platforms. +// Additionally, platform is used to resolve image configs for +// Docker v1.1, v1.2 format compatibility. +func WithPlatform(p platforms.MatchComparer) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.platform = p + return nil + } +} + +// WithAllPlatforms exports all manifests from a manifest list. +// Missing content will fail the export. +func WithAllPlatforms() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.allPlatforms = true + return nil + } +} + +// WithSkipDockerManifest skips creation of the Docker compatible +// manifest.json file. +func WithSkipDockerManifest() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.skipDockerManifest = true + return nil + } +} + +// WithImage adds the provided images to the exported archive. +func WithImage(is images.Store, name string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + img, err := is.Get(ctx, name) + if err != nil { + return err + } + + img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations) + o.manifests = append(o.manifests, img.Target) + + return nil + } +} + +// WithManifest adds a manifest to the exported archive. +// When names are given they will be set on the manifest in the +// exported archive, creating an index record for each name. +// When no names are provided, it is up to caller to put name annotation to +// on the manifest descriptor if needed. +func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + if len(names) == 0 { + o.manifests = append(o.manifests, manifest) + } + for _, name := range names { + mc := manifest + mc.Annotations = addNameAnnotation(name, manifest.Annotations) + o.manifests = append(o.manifests, mc) + } + + return nil + } +} + +func addNameAnnotation(name string, base map[string]string) map[string]string { + annotations := map[string]string{} + for k, v := range base { + annotations[k] = v + } + + annotations[images.AnnotationImageName] = name + annotations[ocispec.AnnotationRefName] = ociReferenceName(name) + + return annotations +} + +// Export implements Exporter. +func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error { + var eo exportOptions + for _, opt := range opts { + if err := opt(ctx, &eo); err != nil { + return err + } + } + + records := []tarRecord{ + ociLayoutFile(""), + ociIndexRecord(eo.manifests), + } + + algorithms := map[string]struct{}{} + dManifests := map[digest.Digest]*exportManifest{} + resolvedIndex := map[digest.Digest]digest.Digest{} + for _, desc := range eo.manifests { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + mt, ok := dManifests[desc.Digest] + if !ok { + // TODO(containerd): Skip if already added + r, err := getRecords(ctx, store, desc, algorithms) + if err != nil { + return err + } + records = append(records, r...) + + mt = &exportManifest{ + manifest: desc, + } + dManifests[desc.Digest] = mt + } + + name := desc.Annotations[images.AnnotationImageName] + if name != "" && !eo.skipDockerManifest { + mt.names = append(mt.names, name) + } + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + d, ok := resolvedIndex[desc.Digest] + if !ok { + records = append(records, blobRecord(store, desc)) + + p, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return err + } + + var manifests []ocispec.Descriptor + for _, m := range index.Manifests { + if eo.platform != nil { + if m.Platform == nil || eo.platform.Match(*m.Platform) { + manifests = append(manifests, m) + } else if !eo.allPlatforms { + continue + } + } + + r, err := getRecords(ctx, store, m, algorithms) + if err != nil { + return err + } + + records = append(records, r...) + } + + if !eo.skipDockerManifest { + if len(manifests) >= 1 { + if len(manifests) > 1 { + sort.SliceStable(manifests, func(i, j int) bool { + if manifests[i].Platform == nil { + return false + } + if manifests[j].Platform == nil { + return true + } + return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) + }) + } + d = manifests[0].Digest + dManifests[d] = &exportManifest{ + manifest: manifests[0], + } + } else if eo.platform != nil { + return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform") + } + } + resolvedIndex[desc.Digest] = d + } + if d != "" { + if name := desc.Annotations[images.AnnotationImageName]; name != "" { + mt := dManifests[d] + mt.names = append(mt.names, name) + } + + } + default: + return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported") + } + } + + if len(dManifests) > 0 { + tr, err := manifestsRecord(ctx, store, dManifests) + if err != nil { + return errors.Wrap(err, "unable to create manifests file") + } + + records = append(records, tr) + } + + if len(algorithms) > 0 { + records = append(records, directoryRecord("blobs/", 0755)) + for alg := range algorithms { + records = append(records, directoryRecord("blobs/"+alg+"/", 0755)) + } + } + + tw := tar.NewWriter(writer) + defer tw.Close() + return writeTar(ctx, tw, records) +} + +func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}) ([]tarRecord, error) { + var records []tarRecord + exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + records = append(records, blobRecord(store, desc)) + algorithms[desc.Digest.Algorithm().String()] = struct{}{} + return nil, nil + } + + childrenHandler := images.ChildrenHandler(store) + + handlers := images.Handlers( + childrenHandler, + images.HandlerFunc(exportHandler), + ) + + // Walk sequentially since the number of fetchs is likely one and doing in + // parallel requires locking the export handler + if err := images.Walk(ctx, handlers, desc); err != nil { + return nil, err + } + + return records, nil +} + +type tarRecord struct { + Header *tar.Header + CopyTo func(context.Context, io.Writer) (int64, error) +} + +func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord { + path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + return tarRecord{ + Header: &tar.Header{ + Name: path, + Mode: 0444, + Size: desc.Size, + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + r, err := cs.ReaderAt(ctx, desc) + if err != nil { + return 0, errors.Wrap(err, "failed to get reader") + } + defer r.Close() + + // Verify digest + dgstr := desc.Digest.Algorithm().Digester() + + n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) + if err != nil { + return 0, errors.Wrap(err, "failed to copy to tar") + } + if dgstr.Digest() != desc.Digest { + return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest()) + } + return n, nil + }, + } +} + +func directoryRecord(name string, mode int64) tarRecord { + return tarRecord{ + Header: &tar.Header{ + Name: name, + Mode: mode, + Typeflag: tar.TypeDir, + }, + } +} + +func ociLayoutFile(version string) tarRecord { + if version == "" { + version = ocispec.ImageLayoutVersion + } + layout := ocispec.ImageLayout{ + Version: version, + } + + b, err := json.Marshal(layout) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: ocispec.ImageLayoutFile, + Mode: 0444, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } + +} + +func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord { + index := ocispec.Index{ + Versioned: ocispecs.Versioned{ + SchemaVersion: 2, + }, + Manifests: manifests, + } + + b, err := json.Marshal(index) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: "index.json", + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } +} + +type exportManifest struct { + manifest ocispec.Descriptor + names []string +} + +func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) { + mfsts := make([]struct { + Config string + RepoTags []string + Layers []string + }, len(manifests)) + + var i int + for _, m := range manifests { + p, err := content.ReadBlob(ctx, store, m.manifest) + if err != nil { + return tarRecord{}, err + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return tarRecord{}, err + } + if err := manifest.Config.Digest.Validate(); err != nil { + return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest) + } + + dgst := manifest.Config.Digest + mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded()) + for _, l := range manifest.Layers { + path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded()) + mfsts[i].Layers = append(mfsts[i].Layers, path) + } + + for _, name := range m.names { + nname, err := familiarizeReference(name) + if err != nil { + return tarRecord{}, err + } + + mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname) + } + + i++ + } + + b, err := json.Marshal(mfsts) + if err != nil { + return tarRecord{}, err + } + + return tarRecord{ + Header: &tar.Header{ + Name: "manifest.json", + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + }, nil +} + +func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error { + sort.Slice(records, func(i, j int) bool { + return records[i].Header.Name < records[j].Header.Name + }) + + var last string + for _, record := range records { + if record.Header.Name == last { + continue + } + last = record.Header.Name + if err := tw.WriteHeader(record.Header); err != nil { + return err + } + if record.CopyTo != nil { + n, err := record.CopyTo(ctx, tw) + if err != nil { + return err + } + if n != record.Header.Size { + return errors.Errorf("unexpected copy size for %s", record.Header.Name) + } + } else if record.Header.Size > 0 { + return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go index 692c76b1ff010..5bc887130f57b 100644 --- a/vendor/github.com/containerd/containerd/images/archive/importer.go +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -22,12 +22,14 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" "path" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" digest "github.com/opencontainers/go-digest" @@ -36,6 +38,22 @@ import ( "github.com/pkg/errors" ) +type importOpts struct { + compress bool +} + +// ImportOpt is an option for importing an OCI index +type ImportOpt func(*importOpts) error + +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(io *importOpts) error { + io.compress = true + return nil + } +} + // ImportIndex imports an index from a tar archive image bundle // - implements Docker v1.1, v1.2 and OCI v1. // - prefers OCI v1 when provided @@ -43,8 +61,7 @@ import ( // - normalizes Docker references and adds as OCI ref name // e.g. alpine:latest -> docker.io/library/alpine:latest // - existing OCI reference names are untouched -// - TODO: support option to compress layers on ingest -func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) { +func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { var ( tr = tar.NewReader(reader) @@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } symlinks = make(map[string]string) blobs = make(map[string]ocispec.Descriptor) + iopts importOpts ) + + for _, o := range opts { + if err := o(&iopts); err != nil { + return ocispec.Descriptor{}, err + } + } + for { hdr, err := tr.Next() if err == io.EOF { @@ -99,7 +124,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } // If OCI layout was given, interpret the tar as an OCI layout. - // When not provided, the layout of the tar will be interpretted + // When not provided, the layout of the tar will be interpreted // as Docker v1.1 or v1.2. if ociLayout.Version != "" { if ociLayout.Version != ocispec.ImageLayoutVersion { @@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc if !ok { return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config) } - config.MediaType = ocispec.MediaTypeImageConfig + config.MediaType = images.MediaTypeDockerSchema2Config - layers, err := resolveLayers(ctx, store, mfst.Layers, blobs) + layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress) if err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers") } - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: config, - Layers: layers, + manifest := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: config, + Layers: layers, } desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest) @@ -181,7 +210,8 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } mfstdesc.Annotations = map[string]string{ - ocispec.AnnotationRefName: normalized, + images.AnnotationImageName: normalized, + ocispec.AnnotationRefName: ociReferenceName(normalized), } idx.Manifests = append(idx.Manifests, mfstdesc) @@ -210,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size return dgstr.Digest(), nil } -func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var layers []ocispec.Descriptor - for _, f := range layerFiles { +func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) { + layers := make([]ocispec.Descriptor, len(layerFiles)) + descs := map[digest.Digest]*ocispec.Descriptor{} + filters := []string{} + for i, f := range layerFiles { desc, ok := blobs[f] if !ok { return nil, errors.Errorf("layer %q not found", f) } + layers[i] = desc + descs[desc.Digest] = &layers[i] + filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String()) + } + err := store.Walk(ctx, func(info content.Info) error { + dgst, ok := info.Labels["containerd.io/uncompressed"] + if ok { + desc := descs[digest.Digest(dgst)] + if desc != nil { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + desc.Digest = info.Digest + desc.Size = info.Size + } + } + return nil + }, filters...) + if err != nil { + return nil, errors.Wrap(err, "failure checking for compressed blobs") + } + + for i, desc := range layers { + if desc.MediaType != "" { + continue + } // Open blob, resolve media type ra, err := store.ReaderAt(ctx, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest) + return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest) } s, err := compression.DecompressStream(content.NewReader(ra)) if err != nil { - return nil, errors.Wrapf(err, "failed to detect compression for %q", f) + return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i]) } if s.GetCompression() == compression.Uncompressed { - // TODO: Support compressing and writing back to content store - desc.MediaType = ocispec.MediaTypeImageLayer + if compress { + ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + labels := map[string]string{ + "containerd.io/uncompressed": desc.Digest.String(), + } + layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) + if err != nil { + s.Close() + return nil, err + } + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2Layer + } } else { - desc.MediaType = ocispec.MediaTypeImageLayerGzip + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip } s.Close() - layers = append(layers, desc) } return layers, nil } +func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) { + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer") + } + + defer func() { + w.Close() + if err != nil { + cs.Abort(ctx, ref) + } + }() + if err := w.Truncate(0); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer") + } + + cw, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return ocispec.Descriptor{}, err + } + + if _, err := io.Copy(cw, r); err != nil { + return ocispec.Descriptor{}, err + } + if err := cw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + + cst, err := w.Status() + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status") + } + + desc.Digest = w.Digest() + desc.Size = cst.Offset + + if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit") + } + } + + return desc, nil +} + func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) { manifestBytes, err := json.Marshal(manifest) if err != nil { diff --git a/vendor/github.com/containerd/containerd/images/archive/reference.go b/vendor/github.com/containerd/containerd/images/archive/reference.go index 2e80a968a0aaa..cd63517e53563 100644 --- a/vendor/github.com/containerd/containerd/images/archive/reference.go +++ b/vendor/github.com/containerd/containerd/images/archive/reference.go @@ -19,7 +19,8 @@ package archive import ( "strings" - "github.com/docker/distribution/reference" + "github.com/containerd/containerd/reference" + distref "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -69,7 +70,7 @@ func isImagePrefix(s, prefix string) bool { func normalizeReference(ref string) (string, error) { // TODO: Replace this function to not depend on reference package - normalized, err := reference.ParseDockerRef(ref) + normalized, err := distref.ParseDockerRef(ref) if err != nil { return "", errors.Wrapf(err, "normalize image ref %q", ref) } @@ -77,6 +78,31 @@ func normalizeReference(ref string) (string, error) { return normalized.String(), nil } +func familiarizeReference(ref string) (string, error) { + named, err := distref.ParseNormalizedNamed(ref) + if err != nil { + return "", errors.Wrapf(err, "failed to parse %q", ref) + } + named = distref.TagNameOnly(named) + + return distref.FamiliarString(named), nil +} + +func ociReferenceName(name string) string { + // OCI defines the reference name as only a tag excluding the + // repository. The containerd annotation contains the full image name + // since the tag is insufficient for correctly naming and referring to an + // image + var ociRef string + if spec, err := reference.Parse(name); err == nil { + ociRef = spec.Object + } else { + ociRef = name + } + + return ociRef +} + // DigestTranslator creates a digest reference by adding the // digest to an image name func DigestTranslator(prefix string) func(digest.Digest) string { diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go index dac701bb81442..04c2d5a6055f1 100644 --- a/vendor/github.com/containerd/containerd/images/handlers.go +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -117,7 +117,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err // // If any handler returns an error, the dispatch session will be canceled. func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { - eg, ctx := errgroup.WithContext(ctx) + eg, ctx2 := errgroup.WithContext(ctx) for _, desc := range descs { desc := desc @@ -126,10 +126,11 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, return err } } + eg.Go(func() error { desc := desc - children, err := handler.Handle(ctx, desc) + children, err := handler.Handle(ctx2, desc) if limiter != nil { limiter.Release(1) } @@ -141,7 +142,7 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, } if len(children) > 0 { - return Dispatch(ctx, handler, limiter, children...) + return Dispatch(ctx2, handler, limiter, children...) } return nil diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index f72684d82946e..ee5778d249f34 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "sort" - "strings" "time" "github.com/containerd/containerd/content" @@ -119,7 +118,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor } size += desc.Size return nil, nil - }), FilterPlatforms(ChildrenHandler(provider), platform)), image.Target) + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) } type platformManifest struct { @@ -142,6 +141,7 @@ type platformManifest struct { // this direction because this abstraction is not needed.` func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { var ( + limit = 1 m []platformManifest wasIndex bool ) @@ -210,10 +210,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } } + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + wasIndex = true + if len(descs) > limit { + return descs[:limit], nil + } return descs, nil - } return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) }), image); err != nil { @@ -227,17 +239,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } return ocispec.Manifest{}, err } - - sort.SliceStable(m, func(i, j int) bool { - if m[i].p == nil { - return false - } - if m[j].p == nil { - return true - } - return platform.Less(*m[i].p, *m[j].p) - }) - return *m[0].m, nil } @@ -356,15 +357,11 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr } descs = append(descs, index.Manifests...) - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, - MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, - MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, - ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, - ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip, - MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: - // childless data types. - return nil, nil default: + if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { + // childless data types. + return nil, nil + } log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) } @@ -387,22 +384,3 @@ func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.D } return config.RootFS.DiffIDs, nil } - -// IsCompressedDiff returns true if mediaType is a known compressed diff media type. -// It returns false if the media type is a diff, but not compressed. If the media type -// is not a known diff type, it returns errdefs.ErrNotImplemented -func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) { - switch mediaType { - case ocispec.MediaTypeImageLayer, MediaTypeDockerSchema2Layer: - case ocispec.MediaTypeImageLayerGzip, MediaTypeDockerSchema2LayerGzip: - return true, nil - default: - // Still apply all generic media types *.tar[.+]gzip and *.tar - if strings.HasSuffix(mediaType, ".tar.gzip") || strings.HasSuffix(mediaType, ".tar+gzip") { - return true, nil - } else if !strings.HasSuffix(mediaType, ".tar") { - return false, errdefs.ErrNotImplemented - } - } - return false, nil -} diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index 186a3b6730395..2f47b0e682d8e 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -16,6 +16,15 @@ package images +import ( + "context" + "sort" + "strings" + + "github.com/containerd/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + // mediatype definitions for image components handled in containerd. // // oci components are generally referenced directly, although we may centralize @@ -40,3 +49,78 @@ const ( // Legacy Docker schema1 manifest MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" ) + +// DiffCompression returns the compression as defined by the layer diff media +// type. For Docker media types without compression, "unknown" is returned to +// indicate that the media type may be compressed. If the media type is not +// recognized as a layer diff, then it returns errdefs.ErrNotImplemented +func DiffCompression(ctx context.Context, mediaType string) (string, error) { + base, ext := parseMediaTypes(mediaType) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + // These media types may have been compressed but failed to + // use the correct media type. The decompression function + // should detect and handle this case. + return "unknown", nil + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "gzip", nil + case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: + if len(ext) > 0 { + switch ext[len(ext)-1] { + case "gzip": + return "gzip", nil + } + } + return "", nil + default: + return "", errdefs.ErrNotImplemented + } +} + +// parseMediaTypes splits the media type into the base type and +// an array of sorted extensions +func parseMediaTypes(mt string) (string, []string) { + if mt == "" { + return "", []string{} + } + + s := strings.Split(mt, "+") + ext := s[1:] + sort.Strings(ext) + + return s[0], ext +} + +// IsLayerTypes returns true if the media type is a layer +func IsLayerType(mt string) bool { + if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { + return true + } + + // Parse Docker media types, strip off any + suffixes first + base, _ := parseMediaTypes(mt) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: + return true + } + return false +} + +// IsKnownConfig returns true if the media type is a known config type +func IsKnownConfig(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: + return true + } + return false +} diff --git a/vendor/github.com/containerd/containerd/images/oci/exporter.go b/vendor/github.com/containerd/containerd/images/oci/exporter.go deleted file mode 100644 index 8bb5354894c82..0000000000000 --- a/vendor/github.com/containerd/containerd/images/oci/exporter.go +++ /dev/null @@ -1,241 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package oci - -import ( - "archive/tar" - "context" - "encoding/json" - "io" - "sort" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" - ocispecs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// V1Exporter implements OCI Image Spec v1. -// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc. -// -// TODO(AkihiroSuda): add V1Exporter{TranslateMediaTypes: true} that transforms media types, -// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip -// -> application/vnd.oci.image.layer.v1.tar+gzip -type V1Exporter struct { - AllPlatforms bool -} - -// V1ExporterOpt allows the caller to set additional options to a new V1Exporter -type V1ExporterOpt func(c *V1Exporter) error - -// DefaultV1Exporter return a default V1Exporter pointer -func DefaultV1Exporter() *V1Exporter { - return &V1Exporter{ - AllPlatforms: false, - } -} - -// ResolveV1ExportOpt return a new V1Exporter with V1ExporterOpt -func ResolveV1ExportOpt(opts ...V1ExporterOpt) (*V1Exporter, error) { - exporter := DefaultV1Exporter() - for _, o := range opts { - if err := o(exporter); err != nil { - return exporter, err - } - } - return exporter, nil -} - -// WithAllPlatforms set V1Exporter`s AllPlatforms option -func WithAllPlatforms(allPlatforms bool) V1ExporterOpt { - return func(c *V1Exporter) error { - c.AllPlatforms = allPlatforms - return nil - } -} - -// Export implements Exporter. -func (oe *V1Exporter) Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error { - tw := tar.NewWriter(writer) - defer tw.Close() - - records := []tarRecord{ - ociLayoutFile(""), - ociIndexRecord(desc), - } - - algorithms := map[string]struct{}{} - exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - records = append(records, blobRecord(store, desc)) - algorithms[desc.Digest.Algorithm().String()] = struct{}{} - return nil, nil - } - - childrenHandler := images.ChildrenHandler(store) - - if !oe.AllPlatforms { - // get local default platform to fetch image manifest - childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Any(platforms.DefaultSpec())) - } - - handlers := images.Handlers( - childrenHandler, - images.HandlerFunc(exportHandler), - ) - - // Walk sequentially since the number of fetchs is likely one and doing in - // parallel requires locking the export handler - if err := images.Walk(ctx, handlers, desc); err != nil { - return err - } - - if len(algorithms) > 0 { - records = append(records, directoryRecord("blobs/", 0755)) - for alg := range algorithms { - records = append(records, directoryRecord("blobs/"+alg+"/", 0755)) - } - } - - return writeTar(ctx, tw, records) -} - -type tarRecord struct { - Header *tar.Header - CopyTo func(context.Context, io.Writer) (int64, error) -} - -func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord { - path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex() - return tarRecord{ - Header: &tar.Header{ - Name: path, - Mode: 0444, - Size: desc.Size, - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - r, err := cs.ReaderAt(ctx, desc) - if err != nil { - return 0, errors.Wrap(err, "failed to get reader") - } - defer r.Close() - - // Verify digest - dgstr := desc.Digest.Algorithm().Digester() - - n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) - if err != nil { - return 0, errors.Wrap(err, "failed to copy to tar") - } - if dgstr.Digest() != desc.Digest { - return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest()) - } - return n, nil - }, - } -} - -func directoryRecord(name string, mode int64) tarRecord { - return tarRecord{ - Header: &tar.Header{ - Name: name, - Mode: mode, - Typeflag: tar.TypeDir, - }, - } -} - -func ociLayoutFile(version string) tarRecord { - if version == "" { - version = ocispec.ImageLayoutVersion - } - layout := ocispec.ImageLayout{ - Version: version, - } - - b, err := json.Marshal(layout) - if err != nil { - panic(err) - } - - return tarRecord{ - Header: &tar.Header{ - Name: ocispec.ImageLayoutFile, - Mode: 0444, - Size: int64(len(b)), - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - n, err := w.Write(b) - return int64(n), err - }, - } - -} - -func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord { - index := ocispec.Index{ - Versioned: ocispecs.Versioned{ - SchemaVersion: 2, - }, - Manifests: manifests, - } - - b, err := json.Marshal(index) - if err != nil { - panic(err) - } - - return tarRecord{ - Header: &tar.Header{ - Name: "index.json", - Mode: 0644, - Size: int64(len(b)), - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - n, err := w.Write(b) - return int64(n), err - }, - } -} - -func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error { - sort.Slice(records, func(i, j int) bool { - return records[i].Header.Name < records[j].Header.Name - }) - - for _, record := range records { - if err := tw.WriteHeader(record.Header); err != nil { - return err - } - if record.CopyTo != nil { - n, err := record.CopyTo(ctx, tw) - if err != nil { - return err - } - if n != record.Header.Size { - return errors.Errorf("unexpected copy size for %s", record.Header.Name) - } - } else if record.Header.Size > 0 { - return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) - } - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go index 9825f31672083..6080161f8412a 100644 --- a/vendor/github.com/containerd/containerd/import.go +++ b/vendor/github.com/containerd/containerd/import.go @@ -35,6 +35,7 @@ type importOpts struct { imageRefT func(string) string dgstRefT func(digest.Digest) string allPlatforms bool + compress bool } // ImportOpt allows the caller to specify import specific options @@ -74,9 +75,18 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt { } } +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(c *importOpts) error { + c.compress = true + return nil + } +} + // Import imports an image from a Tar stream using reader. // Caller needs to specify importer. Future version may use oci.v1 as the default. -// Note that unreferrenced blobs may be imported to the content store as well. +// Note that unreferenced blobs may be imported to the content store as well. func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt) ([]images.Image, error) { var iopts importOpts for _, o := range opts { @@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } defer done(ctx) - index, err := archive.ImportIndex(ctx, c.ContentStore(), reader) + var aio []archive.ImportOpt + if iopts.compress { + aio = append(aio, archive.WithImportCompression()) + } + + index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...) if err != nil { return nil, err } @@ -110,7 +125,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } var platformMatcher = platforms.All if !iopts.allPlatforms { - platformMatcher = platforms.Default() + platformMatcher = c.platform } var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { @@ -130,16 +145,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } for _, m := range idx.Manifests { - if ref := m.Annotations[ocispec.AnnotationRefName]; ref != "" { - if iopts.imageRefT != nil { - ref = iopts.imageRefT(ref) - } - if ref != "" { - imgs = append(imgs, images.Image{ - Name: ref, - Target: m, - }) - } + name := imageName(m.Annotations, iopts.imageRefT) + if name != "" { + imgs = append(imgs, images.Image{ + Name: name, + Target: m, + }) } if iopts.dgstRefT != nil { ref := iopts.dgstRefT(m.Digest) @@ -178,3 +189,17 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt return imgs, nil } + +func imageName(annotations map[string]string, ociCleanup func(string) string) string { + name := annotations[images.AnnotationImageName] + if name != "" { + return name + } + name = annotations[ocispec.AnnotationRefName] + if name != "" { + if ociCleanup != nil { + name = ociCleanup(name) + } + } + return name +} diff --git a/vendor/github.com/containerd/containerd/install.go b/vendor/github.com/containerd/containerd/install.go index 4545d4554e810..5b8b735ded51b 100644 --- a/vendor/github.com/containerd/containerd/install.go +++ b/vendor/github.com/containerd/containerd/install.go @@ -27,7 +27,6 @@ import ( "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/pkg/errors" ) @@ -43,7 +42,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) } var ( cs = image.ContentStore() - platform = platforms.Default() + platform = c.platform ) manifest, err := images.Manifest(ctx, cs, image.Target(), platform) if err != nil { diff --git a/vendor/github.com/containerd/containerd/leases/lease.go b/vendor/github.com/containerd/containerd/leases/lease.go index 909b4ea0bb184..058d065594f35 100644 --- a/vendor/github.com/containerd/containerd/leases/lease.go +++ b/vendor/github.com/containerd/containerd/leases/lease.go @@ -32,6 +32,9 @@ type Manager interface { Create(context.Context, ...Opt) (Lease, error) Delete(context.Context, Lease, ...DeleteOpt) error List(context.Context, ...string) ([]Lease, error) + AddResource(context.Context, Lease, Resource) error + DeleteResource(context.Context, Lease, Resource) error + ListResources(context.Context, Lease) ([]Resource, error) } // Lease retains resources to prevent cleanup before @@ -42,6 +45,13 @@ type Lease struct { Labels map[string]string } +// Resource represents low level resource of image, like content, ingest and +// snapshotter. +type Resource struct { + ID string + Type string +} + // DeleteOptions provide options on image delete type DeleteOptions struct { Synchronous bool diff --git a/vendor/github.com/containerd/containerd/leases/proxy/manager.go b/vendor/github.com/containerd/containerd/leases/proxy/manager.go index 30afe5368e7ae..96cd5e653ba47 100644 --- a/vendor/github.com/containerd/containerd/leases/proxy/manager.go +++ b/vendor/github.com/containerd/containerd/leases/proxy/manager.go @@ -91,3 +91,43 @@ func (pm *proxyManager) List(ctx context.Context, filters ...string) ([]leases.L return l, nil } + +func (pm *proxyManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + _, err := pm.client.AddResource(ctx, &leasesapi.AddResourceRequest{ + ID: lease.ID, + Resource: leasesapi.Resource{ + ID: r.ID, + Type: r.Type, + }, + }) + return errdefs.FromGRPC(err) +} + +func (pm *proxyManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + _, err := pm.client.DeleteResource(ctx, &leasesapi.DeleteResourceRequest{ + ID: lease.ID, + Resource: leasesapi.Resource{ + ID: r.ID, + Type: r.Type, + }, + }) + return errdefs.FromGRPC(err) +} + +func (pm *proxyManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + resp, err := pm.client.ListResources(ctx, &leasesapi.ListResourcesRequest{ + ID: lease.ID, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + rs := make([]leases.Resource, 0, len(resp.Resources)) + for _, i := range resp.Resources { + rs = append(rs, leases.Resource{ + ID: i.ID, + Type: i.Type, + }) + } + return rs, nil +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 3fab96b858628..31f1a3ac09b1e 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -30,7 +30,7 @@ var ( // messages. G = GetLogger - // L is an alias for the the standard logger. + // L is an alias for the standard logger. L = logrus.NewEntry(logrus.StandardLogger()) ) diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go index af8224786df84..09b0d203d2948 100644 --- a/vendor/github.com/containerd/containerd/metadata/containers.go +++ b/vendor/github.com/containerd/containerd/metadata/containers.go @@ -19,6 +19,7 @@ package metadata import ( "context" "strings" + "sync/atomic" "time" "github.com/containerd/containerd/containers" @@ -35,13 +36,13 @@ import ( ) type containerStore struct { - tx *bolt.Tx + db *DB } // NewContainerStore returns a Store backed by an underlying bolt DB -func NewContainerStore(tx *bolt.Tx) containers.Store { +func NewContainerStore(db *DB) containers.Store { return &containerStore{ - tx: tx, + db: db, } } @@ -51,14 +52,21 @@ func (s *containerStore) Get(ctx context.Context, id string) (containers.Contain return containers.Container{}, err } - bkt := getContainerBucket(s.tx, namespace, id) - if bkt == nil { - return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace) - } - container := containers.Container{ID: id} - if err := readContainer(&container, bkt); err != nil { - return containers.Container{}, errors.Wrapf(err, "failed to read container %q", id) + + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainerBucket(tx, namespace, id) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace) + } + + if err := readContainer(&container, bkt); err != nil { + return errors.Wrapf(err, "failed to read container %q", id) + } + + return nil + }); err != nil { + return containers.Container{}, err } return container, nil @@ -75,27 +83,30 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) } - bkt := getContainersBucket(s.tx, namespace) - if bkt == nil { - return nil, nil // empty store - } - var m []containers.Container - if err := bkt.ForEach(func(k, v []byte) error { - cbkt := bkt.Bucket(k) - if cbkt == nil { - return nil - } - container := containers.Container{ID: string(k)} - if err := readContainer(&container, cbkt); err != nil { - return errors.Wrapf(err, "failed to read container %q", string(k)) + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainersBucket(tx, namespace) + if bkt == nil { + return nil // empty store } - if filter.Match(adaptContainer(container)) { - m = append(m, container) - } - return nil + return bkt.ForEach(func(k, v []byte) error { + cbkt := bkt.Bucket(k) + if cbkt == nil { + return nil + } + container := containers.Container{ID: string(k)} + + if err := readContainer(&container, cbkt); err != nil { + return errors.Wrapf(err, "failed to read container %q", string(k)) + } + + if filter.Match(adaptContainer(container)) { + m = append(m, container) + } + return nil + }) }); err != nil { return nil, err } @@ -113,23 +124,29 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai return containers.Container{}, errors.Wrap(err, "create container failed validation") } - bkt, err := createContainersBucket(s.tx, namespace) - if err != nil { - return containers.Container{}, err - } + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt, err := createContainersBucket(tx, namespace) + if err != nil { + return err + } - cbkt, err := bkt.CreateBucket([]byte(container.ID)) - if err != nil { - if err == bolt.ErrBucketExists { - err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID) + cbkt, err := bkt.CreateBucket([]byte(container.ID)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID) + } + return err } - return containers.Container{}, err - } - container.CreatedAt = time.Now().UTC() - container.UpdatedAt = container.CreatedAt - if err := writeContainer(cbkt, &container); err != nil { - return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID) + container.CreatedAt = time.Now().UTC() + container.UpdatedAt = container.CreatedAt + if err := writeContainer(cbkt, &container); err != nil { + return errors.Wrapf(err, "failed to write container %q", container.ID) + } + + return nil + }); err != nil { + return containers.Container{}, err } return container, nil @@ -145,85 +162,91 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "must specify a container id") } - bkt := getContainersBucket(s.tx, namespace) - if bkt == nil { - return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace) - } - - cbkt := bkt.Bucket([]byte(container.ID)) - if cbkt == nil { - return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID) - } - var updated containers.Container - if err := readContainer(&updated, cbkt); err != nil { - return updated, errors.Wrapf(err, "failed to read container %q", container.ID) - } - createdat := updated.CreatedAt - updated.ID = container.ID - - if len(fieldpaths) == 0 { - // only allow updates to these field on full replace. - fieldpaths = []string{"labels", "spec", "extensions", "image", "snapshotkey"} - - // Fields that are immutable must cause an error when no field paths - // are provided. This allows these fields to become mutable in the - // future. - if updated.Snapshotter != container.Snapshotter { - return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable") + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainersBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace) } - if updated.Runtime.Name != container.Runtime.Name { - return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable") + cbkt := bkt.Bucket([]byte(container.ID)) + if cbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID) } - } - // apply the field mask. If you update this code, you better follow the - // field mask rules in field_mask.proto. If you don't know what this - // is, do not update this code. - for _, path := range fieldpaths { - if strings.HasPrefix(path, "labels.") { - if updated.Labels == nil { - updated.Labels = map[string]string{} + if err := readContainer(&updated, cbkt); err != nil { + return errors.Wrapf(err, "failed to read container %q", container.ID) + } + createdat := updated.CreatedAt + updated.ID = container.ID + + if len(fieldpaths) == 0 { + // only allow updates to these field on full replace. + fieldpaths = []string{"labels", "spec", "extensions", "image", "snapshotkey"} + + // Fields that are immutable must cause an error when no field paths + // are provided. This allows these fields to become mutable in the + // future. + if updated.Snapshotter != container.Snapshotter { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable") + } + + if updated.Runtime.Name != container.Runtime.Name { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable") } - key := strings.TrimPrefix(path, "labels.") - updated.Labels[key] = container.Labels[key] - continue } - if strings.HasPrefix(path, "extensions.") { - if updated.Extensions == nil { - updated.Extensions = map[string]types.Any{} + // apply the field mask. If you update this code, you better follow the + // field mask rules in field_mask.proto. If you don't know what this + // is, do not update this code. + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = container.Labels[key] + continue + } + + if strings.HasPrefix(path, "extensions.") { + if updated.Extensions == nil { + updated.Extensions = map[string]types.Any{} + } + key := strings.TrimPrefix(path, "extensions.") + updated.Extensions[key] = container.Extensions[key] + continue + } + + switch path { + case "labels": + updated.Labels = container.Labels + case "spec": + updated.Spec = container.Spec + case "extensions": + updated.Extensions = container.Extensions + case "image": + updated.Image = container.Image + case "snapshotkey": + updated.SnapshotKey = container.SnapshotKey + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID) } - key := strings.TrimPrefix(path, "extensions.") - updated.Extensions[key] = container.Extensions[key] - continue } - switch path { - case "labels": - updated.Labels = container.Labels - case "spec": - updated.Spec = container.Spec - case "extensions": - updated.Extensions = container.Extensions - case "image": - updated.Image = container.Image - case "snapshotkey": - updated.SnapshotKey = container.SnapshotKey - default: - return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID) + if err := validateContainer(&updated); err != nil { + return errors.Wrap(err, "update failed validation") } - } - if err := validateContainer(&updated); err != nil { - return containers.Container{}, errors.Wrap(err, "update failed validation") - } + updated.CreatedAt = createdat + updated.UpdatedAt = time.Now().UTC() + if err := writeContainer(cbkt, &updated); err != nil { + return errors.Wrapf(err, "failed to write container %q", container.ID) + } - updated.CreatedAt = createdat - updated.UpdatedAt = time.Now().UTC() - if err := writeContainer(cbkt, &updated); err != nil { - return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID) + return nil + }); err != nil { + return containers.Container{}, err } return updated, nil @@ -235,15 +258,23 @@ func (s *containerStore) Delete(ctx context.Context, id string) error { return err } - bkt := getContainersBucket(s.tx, namespace) - if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace) - } + return update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainersBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace) + } - if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound { - return errors.Wrapf(errdefs.ErrNotFound, "container %v", id) - } - return err + if err := bkt.DeleteBucket([]byte(id)); err != nil { + if err == bolt.ErrBucketNotFound { + err = errors.Wrapf(errdefs.ErrNotFound, "container %v", id) + } + return err + } + + atomic.AddUint32(&s.db.dirty, 1) + + return nil + }) } func validateContainer(container *containers.Container) error { diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index 00f310f7b4502..268a9b1b79cf4 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "strings" "sync" + "sync/atomic" "time" "github.com/containerd/containerd/content" @@ -221,9 +222,8 @@ func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error { } // Mark content store as dirty for triggering garbage collection - cs.db.dirtyL.Lock() + atomic.AddUint32(&cs.db.dirty, 1) cs.db.dirtyCS = true - cs.db.dirtyL.Unlock() return nil }) @@ -567,6 +567,8 @@ func (nw *namespacedWriter) createAndCopy(ctx context.Context, desc ocispec.Desc } func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + ctx = namespaces.WithNamespace(ctx, nw.namespace) + nw.l.RLock() defer nw.l.RUnlock() @@ -635,11 +637,11 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size) } size = status.Offset - actual = nw.w.Digest() if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) { return "", err } + actual = nw.w.Digest() } bkt, err := createBlobBucket(tx, nw.namespace, actual) diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go index 7f1b27b383e08..40d045f05d3e5 100644 --- a/vendor/github.com/containerd/containerd/metadata/db.go +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "strings" "sync" + "sync/atomic" "time" "github.com/containerd/containerd/content" @@ -75,10 +76,16 @@ type DB struct { // sweep phases without preventing read transactions. wlock sync.RWMutex - // dirty flags and lock keeps track of datastores which have had deletions - // since the last garbage collection. These datastores will will be garbage - // collected during the next garbage collection. - dirtyL sync.Mutex + // dirty flag indicates that references have been removed which require + // a garbage collection to ensure the database is clean. This tracks + // the number of dirty operations. This should be updated and read + // atomically if outside of wlock.Lock. + dirty uint32 + + // dirtySS and dirtyCS flags keeps track of datastores which have had + // deletions since the last garbage collection. These datastores will + // be garbage collected during the next garbage collection. These + // should only be updated inside of a write transaction or wlock.Lock. dirtySS map[string]struct{} dirtyCS bool @@ -162,7 +169,7 @@ func (m *DB) Init(ctx context.Context) error { } } - // Previous version fo database found + // Previous version of database found if schema != "v0" { updates := migrations[i:] @@ -237,12 +244,10 @@ func (m *DB) Update(fn func(*bolt.Tx) error) error { defer m.wlock.RUnlock() err := m.db.Update(fn) if err == nil { - m.dirtyL.Lock() - dirty := m.dirtyCS || len(m.dirtySS) > 0 + dirty := atomic.LoadUint32(&m.dirty) > 0 for _, fn := range m.mutationCallbacks { fn(dirty) } - m.dirtyL.Unlock() } return err @@ -254,9 +259,9 @@ func (m *DB) Update(fn func(*bolt.Tx) error) error { // The callback function is an argument for whether a deletion has occurred // since the last garbage collection. func (m *DB) RegisterMutationCallback(fn func(bool)) { - m.dirtyL.Lock() + m.wlock.Lock() m.mutationCallbacks = append(m.mutationCallbacks, fn) - m.dirtyL.Unlock() + m.wlock.Unlock() } // GCStats holds the duration for the different phases of the garbage collector @@ -282,8 +287,6 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { return nil, err } - m.dirtyL.Lock() - if err := m.db.Update(func(tx *bolt.Tx) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -309,7 +312,6 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { return nil }); err != nil { - m.dirtyL.Unlock() m.wlock.Unlock() return nil, err } @@ -317,6 +319,9 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { var stats GCStats var wg sync.WaitGroup + // reset dirty, no need for atomic inside of wlock.Lock + m.dirty = 0 + if len(m.dirtySS) > 0 { var sl sync.Mutex stats.SnapshotD = map[string]time.Duration{} @@ -349,8 +354,6 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { m.dirtyCS = false } - m.dirtyL.Unlock() - stats.MetaD = time.Since(t1) m.wlock.Unlock() diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go index 6afaa17729181..afe16c9222d77 100644 --- a/vendor/github.com/containerd/containerd/metadata/gc.go +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -46,11 +46,17 @@ const ( ResourceIngest ) +const ( + resourceContentFlat = ResourceContent | 0x20 + resourceSnapshotFlat = ResourceSnapshot | 0x20 +) + var ( labelGCRoot = []byte("containerd.io/gc.root") labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.") labelGCContentRef = []byte("containerd.io/gc.ref.content") labelGCExpire = []byte("containerd.io/gc.expire") + labelGCFlat = []byte("containerd.io/gc.flat") ) func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { @@ -90,6 +96,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return nil } libkt := lbkt.Bucket(k) + var flat bool if lblbkt := libkt.Bucket(bucketKeyObjectLabels); lblbkt != nil { if expV := lblbkt.Get(labelGCExpire); expV != nil { @@ -102,6 +109,10 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return nil } } + + if flatV := lblbkt.Get(labelGCFlat); flatV != nil { + flat = true + } } fn(gcnode(ResourceLease, ns, string(k))) @@ -111,16 +122,26 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { // no need to allow the lookup to be recursive, handling here // therefore reduces the number of database seeks. + ctype := ResourceContent + if flat { + ctype = resourceContentFlat + } + cbkt := libkt.Bucket(bucketKeyObjectContent) if cbkt != nil { if err := cbkt.ForEach(func(k, v []byte) error { - fn(gcnode(ResourceContent, ns, string(k))) + fn(gcnode(ctype, ns, string(k))) return nil }); err != nil { return err } } + stype := ResourceSnapshot + if flat { + stype = resourceSnapshotFlat + } + sbkt := libkt.Bucket(bucketKeyObjectSnapshots) if sbkt != nil { if err := sbkt.ForEach(func(sk, sv []byte) error { @@ -130,7 +151,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { snbkt := sbkt.Bucket(sk) return snbkt.ForEach(func(k, v []byte) error { - fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k))) + fn(gcnode(stype, ns, fmt.Sprintf("%s/%s", sk, k))) return nil }) }); err != nil { @@ -257,7 +278,8 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { } func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { - if node.Type == ResourceContent { + switch node.Type { + case ResourceContent: bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key)) if bkt == nil { // Node may be created from dead edge @@ -265,7 +287,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } return sendLabelRefs(node.Namespace, bkt, fn) - } else if node.Type == ResourceSnapshot { + case ResourceSnapshot, resourceSnapshotFlat: parts := strings.SplitN(node.Key, "/", 2) if len(parts) != 2 { return errors.Errorf("invalid snapshot gc key %s", node.Key) @@ -280,11 +302,16 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } if pv := bkt.Get(bucketKeyParent); len(pv) > 0 { - fn(gcnode(ResourceSnapshot, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + fn(gcnode(node.Type, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + } + + // Do not send labeled references for flat snapshot refs + if node.Type == resourceSnapshotFlat { + return nil } return sendLabelRefs(node.Namespace, bkt, fn) - } else if node.Type == ResourceIngest { + case ResourceIngest: // Send expected value bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key)) if bkt == nil { diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go index 1dda753dbee1d..cace4e1801279 100644 --- a/vendor/github.com/containerd/containerd/metadata/images.go +++ b/vendor/github.com/containerd/containerd/metadata/images.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "fmt" "strings" + "sync/atomic" "time" "github.com/containerd/containerd/errdefs" @@ -249,19 +250,16 @@ func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.Del return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) } - err = bkt.DeleteBucket([]byte(name)) - if err == bolt.ErrBucketNotFound { - return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + if err = bkt.DeleteBucket([]byte(name)); err != nil { + if err == bolt.ErrBucketNotFound { + err = errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + return err } - // A reference to a piece of content has been removed, - // mark content store as dirty for triggering garbage - // collection - s.db.dirtyL.Lock() - s.db.dirtyCS = true - s.db.dirtyL.Unlock() + atomic.AddUint32(&s.db.dirty, 1) - return err + return nil }) } diff --git a/vendor/github.com/containerd/containerd/metadata/leases.go b/vendor/github.com/containerd/containerd/metadata/leases.go index a3c1701d8addf..60da06b0f00e0 100644 --- a/vendor/github.com/containerd/containerd/metadata/leases.go +++ b/vendor/github.com/containerd/containerd/metadata/leases.go @@ -18,6 +18,9 @@ package metadata import ( "context" + "fmt" + "strings" + "sync/atomic" "time" "github.com/containerd/containerd/errdefs" @@ -30,17 +33,17 @@ import ( bolt "go.etcd.io/bbolt" ) -// LeaseManager manages the create/delete lifecyle of leases +// LeaseManager manages the create/delete lifecycle of leases // and also returns existing leases type LeaseManager struct { - tx *bolt.Tx + db *DB } // NewLeaseManager creates a new lease manager for managing leases using // the provided database transaction. -func NewLeaseManager(tx *bolt.Tx) *LeaseManager { +func NewLeaseManager(db *DB) *LeaseManager { return &LeaseManager{ - tx: tx, + db: db, } } @@ -61,56 +64,66 @@ func (lm *LeaseManager) Create(ctx context.Context, opts ...leases.Opt) (leases. return leases.Lease{}, err } - topbkt, err := createBucketIfNotExists(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) - if err != nil { - return leases.Lease{}, err - } + if err := update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if err != nil { + return err + } - txbkt, err := topbkt.CreateBucket([]byte(l.ID)) - if err != nil { - if err == bolt.ErrBucketExists { - err = errdefs.ErrAlreadyExists + txbkt, err := topbkt.CreateBucket([]byte(l.ID)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errdefs.ErrAlreadyExists + } + return errors.Wrapf(err, "lease %q", l.ID) } - return leases.Lease{}, errors.Wrapf(err, "lease %q", l.ID) - } - t := time.Now().UTC() - createdAt, err := t.MarshalBinary() - if err != nil { - return leases.Lease{}, err - } - if err := txbkt.Put(bucketKeyCreatedAt, createdAt); err != nil { - return leases.Lease{}, err - } + t := time.Now().UTC() + createdAt, err := t.MarshalBinary() + if err != nil { + return err + } + if err := txbkt.Put(bucketKeyCreatedAt, createdAt); err != nil { + return err + } - if l.Labels != nil { - if err := boltutil.WriteLabels(txbkt, l.Labels); err != nil { - return leases.Lease{}, err + if l.Labels != nil { + if err := boltutil.WriteLabels(txbkt, l.Labels); err != nil { + return err + } } - } - l.CreatedAt = t + l.CreatedAt = t + return nil + }); err != nil { + return leases.Lease{}, err + } return l, nil } -// Delete delets the lease with the provided lease ID +// Delete deletes the lease with the provided lease ID func (lm *LeaseManager) Delete(ctx context.Context, lease leases.Lease, _ ...leases.DeleteOpt) error { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return err } - topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) - if topbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) - } - if err := topbkt.DeleteBucket([]byte(lease.ID)); err != nil { - if err == bolt.ErrBucketNotFound { - err = errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + return update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) } - return err - } - return nil + if err := topbkt.DeleteBucket([]byte(lease.ID)); err != nil { + if err == bolt.ErrBucketNotFound { + err = errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + return err + } + + atomic.AddUint32(&lm.db.dirty, 1) + + return nil + }) } // List lists all active leases @@ -127,44 +140,184 @@ func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, var ll []leases.Lease - topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) - if topbkt == nil { - return ll, nil - } + if err := view(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if topbkt == nil { + return nil + } + + return topbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + txbkt := topbkt.Bucket(k) + + l := leases.Lease{ + ID: string(k), + } + + if v := txbkt.Get(bucketKeyCreatedAt); v != nil { + t := &l.CreatedAt + if err := t.UnmarshalBinary(v); err != nil { + return err + } + } + + labels, err := boltutil.ReadLabels(txbkt) + if err != nil { + return err + } + l.Labels = labels + + if filter.Match(adaptLease(l)) { + ll = append(ll, l) + } - if err := topbkt.ForEach(func(k, v []byte) error { - if v != nil { return nil + }) + }); err != nil { + return nil, err + } + + return ll, nil +} + +// AddResource references the resource by the provided lease. +func (lm *LeaseManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) } - txbkt := topbkt.Bucket(k) - l := leases.Lease{ - ID: string(k), + keys, ref, err := parseLeaseResource(r) + if err != nil { + return err } - if v := txbkt.Get(bucketKeyCreatedAt); v != nil { - t := &l.CreatedAt - if err := t.UnmarshalBinary(v); err != nil { + bkt := topbkt + for _, key := range keys { + bkt, err = bkt.CreateBucketIfNotExists([]byte(key)) + if err != nil { return err } } + return bkt.Put([]byte(ref), nil) + }) +} - labels, err := boltutil.ReadLabels(txbkt) +// DeleteResource dereferences the resource by the provided lease. +func (lm *LeaseManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + keys, ref, err := parseLeaseResource(r) if err != nil { return err } - l.Labels = labels - if filter.Match(adaptLease(l)) { - ll = append(ll, l) + bkt := topbkt + for _, key := range keys { + if bkt == nil { + break + } + bkt = bkt.Bucket([]byte(key)) + } + + if bkt != nil { + if err := bkt.Delete([]byte(ref)); err != nil { + return err + } } + atomic.AddUint32(&lm.db.dirty, 1) + return nil - }); err != nil { + }) +} + +// ListResources lists all the resources referenced by the lease. +func (lm *LeaseManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { return nil, err } - return ll, nil + var rs []leases.Resource + + if err := view(ctx, lm.db, func(tx *bolt.Tx) error { + + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + // content resources + if cbkt := topbkt.Bucket(bucketKeyObjectContent); cbkt != nil { + if err := cbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: string(bucketKeyObjectContent), + }) + + return nil + }); err != nil { + return err + } + } + + // ingest resources + if lbkt := topbkt.Bucket(bucketKeyObjectIngests); lbkt != nil { + if err := lbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: string(bucketKeyObjectIngests), + }) + + return nil + }); err != nil { + return err + } + } + + // snapshot resources + if sbkt := topbkt.Bucket(bucketKeyObjectSnapshots); sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + + snbkt := sbkt.Bucket(sk) + return snbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: fmt.Sprintf("%s/%s", bucketKeyObjectSnapshots, sk), + }) + return nil + }) + }); err != nil { + return err + } + } + + return nil + }); err != nil { + return nil, err + } + return rs, nil } func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error { @@ -307,3 +460,36 @@ func removeIngestLease(ctx context.Context, tx *bolt.Tx, ref string) error { return bkt.Delete([]byte(ref)) } + +func parseLeaseResource(r leases.Resource) ([]string, string, error) { + var ( + ref = r.ID + typ = r.Type + keys = strings.Split(typ, "/") + ) + + switch k := keys[0]; k { + case string(bucketKeyObjectContent), + string(bucketKeyObjectIngests): + + if len(keys) != 1 { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid resource type %s", typ) + } + + if k == string(bucketKeyObjectContent) { + dgst, err := digest.Parse(ref) + if err != nil { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid content resource id %s: %v", ref, err) + } + ref = dgst.String() + } + case string(bucketKeyObjectSnapshots): + if len(keys) != 2 { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid snapshot resource type %s", typ) + } + default: + return nil, "", errors.Wrapf(errdefs.ErrNotImplemented, "resource type %s not supported yet", typ) + } + + return keys, ref, nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/namespaces.go b/vendor/github.com/containerd/containerd/metadata/namespaces.go index 74951eb5c5b62..25d0e1578b2f0 100644 --- a/vendor/github.com/containerd/containerd/metadata/namespaces.go +++ b/vendor/github.com/containerd/containerd/metadata/namespaces.go @@ -129,7 +129,15 @@ func (s *namespaceStore) List(ctx context.Context) ([]string, error) { return namespaces, nil } -func (s *namespaceStore) Delete(ctx context.Context, namespace string) error { +func (s *namespaceStore) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error { + i := &namespaces.DeleteInfo{ + Name: namespace, + } + for _, o := range opts { + if err := o(ctx, i); err != nil { + return err + } + } bkt := getBucket(s.tx, bucketKeyVersion) if empty, err := s.namespaceEmpty(ctx, namespace); err != nil { return err diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go index 54209171989de..4c38b41d7709d 100644 --- a/vendor/github.com/containerd/containerd/metadata/snapshot.go +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -21,6 +21,7 @@ import ( "fmt" "strings" "sync" + "sync/atomic" "time" "github.com/containerd/containerd/errdefs" @@ -34,6 +35,10 @@ import ( bolt "go.etcd.io/bbolt" ) +const ( + inheritedLabelsPrefix = "containerd.io/snapshot/" +) + type snapshotter struct { snapshots.Snapshotter name string @@ -209,6 +214,15 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath bkey = string(sbkt.Get(bucketKeyName)) local.Parent = string(sbkt.Get(bucketKeyParent)) + inner := snapshots.Info{ + Name: bkey, + Labels: filterInheritedLabels(local.Labels), + } + + if _, err := s.Snapshotter.Update(ctx, inner, fieldpaths...); err != nil { + return err + } + return nil }); err != nil { return snapshots.Info{}, err @@ -338,12 +352,14 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return err } + inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels)) + // TODO: Consider doing this outside of transaction to lessen // metadata lock time if readonly { - m, err = s.Snapshotter.View(ctx, bkey, bparent) + m, err = s.Snapshotter.View(ctx, bkey, bparent, inheritedOpt) } else { - m, err = s.Snapshotter.Prepare(ctx, bkey, bparent) + m, err = s.Snapshotter.Prepare(ctx, bkey, bparent, inheritedOpt) } return err }); err != nil { @@ -445,9 +461,11 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap return err } + inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels)) + // TODO: Consider doing this outside of transaction to lessen // metadata lock time - return s.Snapshotter.Commit(ctx, nameKey, bkey) + return s.Snapshotter.Commit(ctx, nameKey, bkey, inheritedOpt) }) } @@ -500,9 +518,8 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { } // Mark snapshotter as dirty for triggering garbage collection - s.db.dirtyL.Lock() + atomic.AddUint32(&s.db.dirty, 1) s.db.dirtySS[s.name] = struct{}{} - s.db.dirtyL.Unlock() return nil }) @@ -761,3 +778,19 @@ func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error { func (s *snapshotter) Close() error { return s.Snapshotter.Close() } + +// filterInheritedLabels filters the provided labels by removing any key which doesn't have +// a prefix of "containerd.io/snapshot/". +func filterInheritedLabels(labels map[string]string) map[string]string { + if labels == nil { + return nil + } + + filtered := make(map[string]string) + for k, v := range labels { + if strings.HasPrefix(k, inheritedLabelsPrefix) { + filtered[k] = v + } + } + return filtered +} diff --git a/vendor/github.com/containerd/containerd/namespaces.go b/vendor/github.com/containerd/containerd/namespaces.go index eea70ca33a3e1..4c66406b084ea 100644 --- a/vendor/github.com/containerd/containerd/namespaces.go +++ b/vendor/github.com/containerd/containerd/namespaces.go @@ -100,10 +100,18 @@ func (r *remoteNamespaces) List(ctx context.Context) ([]string, error) { return namespaces, nil } -func (r *remoteNamespaces) Delete(ctx context.Context, namespace string) error { - var req api.DeleteNamespaceRequest - - req.Name = namespace +func (r *remoteNamespaces) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error { + i := namespaces.DeleteInfo{ + Name: namespace, + } + for _, o := range opts { + if err := o(ctx, &i); err != nil { + return err + } + } + req := api.DeleteNamespaceRequest{ + Name: namespace, + } _, err := r.client.Delete(ctx, &req) if err != nil { return errdefs.FromGRPC(err) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index cc5621a68fb15..20596f09dd09f 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -36,10 +36,9 @@ type namespaceKey struct{} // WithNamespace sets a given namespace on the context func WithNamespace(ctx context.Context, namespace string) context.Context { ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace - - // also store on the grpc headers so it gets picked up by any clients that + // also store on the grpc and ttrpc headers so it gets picked up by any clients that // are using this. - return withGRPCNamespaceHeader(ctx, namespace) + return withTTRPCNamespaceHeader(withGRPCNamespaceHeader(ctx, namespace), namespace) } // NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or @@ -58,22 +57,21 @@ func NamespaceFromEnv(ctx context.Context) context.Context { func Namespace(ctx context.Context) (string, bool) { namespace, ok := ctx.Value(namespaceKey{}).(string) if !ok { - return fromGRPCHeader(ctx) + if namespace, ok = fromGRPCHeader(ctx); !ok { + return fromTTRPCHeader(ctx) + } } - return namespace, ok } -// NamespaceRequired returns the valid namepace from the context or an error. +// NamespaceRequired returns the valid namespace from the context or an error. func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") } - if err := Validate(namespace); err != nil { return "", errors.Wrap(err, "namespace validation") } - return namespace, nil } diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go index 0b5c985691977..5936772cb4cb2 100644 --- a/vendor/github.com/containerd/containerd/namespaces/store.go +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -33,5 +33,14 @@ type Store interface { List(ctx context.Context) ([]string, error) // Delete removes the namespace. The namespace must be empty to be deleted. - Delete(ctx context.Context, namespace string) error + Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error } + +// DeleteInfo specifies information for the deletion of a namespace +type DeleteInfo struct { + // Name of the namespace + Name string +} + +// DeleteOpts allows the caller to set options for namespace deletion +type DeleteOpts func(context.Context, *DeleteInfo) error diff --git a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go new file mode 100644 index 0000000000000..bcd2643cf5ea2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + + "github.com/containerd/ttrpc" +) + +const ( + // TTRPCHeader defines the header name for specifying a containerd namespace + TTRPCHeader = "containerd-namespace-ttrpc" +) + +func copyMetadata(src ttrpc.MD) ttrpc.MD { + md := ttrpc.MD{} + for k, v := range src { + md[k] = append(md[k], v...) + } + return md +} + +func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { + md, ok := ttrpc.GetMetadata(ctx) + if !ok { + md = ttrpc.MD{} + } else { + md = copyMetadata(md) + } + md.Set(TTRPCHeader, namespace) + return ttrpc.WithMetadata(ctx, md) +} + +func fromTTRPCHeader(ctx context.Context) (string, bool) { + return ttrpc.GetMetadataValue(ctx, TTRPCHeader) +} diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index 3e7b5492a0cee..035bb7e7d83d0 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -78,7 +78,7 @@ func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s return err } -// ApplyOpts applys the options to the given spec, injecting data from the +// ApplyOpts applies the options to the given spec, injecting data from the // context, client and container instance. func ApplyOpts(ctx context.Context, client Client, c *containers.Container, s *Spec, opts ...SpecOpts) error { for _, o := range opts { @@ -141,7 +141,6 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { Path: defaultRootfsPath, }, Process: &specs.Process{ - Env: defaultUnixEnv, Cwd: "/", NoNewPrivileges: true, User: specs.User{ diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index ce756108adbce..ad6b52a9f5107 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -17,6 +17,7 @@ package oci import ( + "bufio" "context" "encoding/json" "fmt" @@ -76,6 +77,20 @@ func setLinux(s *Spec) { } } +// nolint +func setResources(s *Spec) { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + } + if s.Windows != nil { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + } +} + // setCapabilities sets Linux Capabilities to empty if unset func setCapabilities(s *Spec) { setProcess(s) @@ -104,7 +119,7 @@ func WithDefaultSpecForPlatform(platform string) SpecOpts { } } -// WithSpecFromBytes loads the the spec from the provided byte slice. +// WithSpecFromBytes loads the spec from the provided byte slice. func WithSpecFromBytes(p []byte) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { *s = Spec{} // make sure spec is cleared. @@ -137,6 +152,13 @@ func WithEnv(environmentVariables []string) SpecOpts { } } +// WithDefaultPathEnv sets the $PATH environment variable to the +// default PATH defined in this package. +func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv) + return nil +} + // replaceOrAppendEnvValues returns the defaults with the overrides either // replaced by env key or appended to the list func replaceOrAppendEnvValues(defaults, overrides []string) []string { @@ -312,7 +334,11 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { setProcess(s) if s.Linux != nil { - s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) + defaults := config.Env + if len(defaults) == 0 { + defaults = defaultUnixEnv + } + s.Process.Env = replaceOrAppendEnvValues(defaults, s.Process.Env) cmd := config.Cmd if len(args) > 0 { cmd = args @@ -334,7 +360,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { // even if there is no specified user in the image config return WithAdditionalGIDs("root")(ctx, client, c, s) } else if s.Windows != nil { - s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) + s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env) cmd := config.Cmd if len(args) > 0 { cmd = args @@ -607,7 +633,7 @@ func WithUserID(uid uint32) SpecOpts { } // WithUsername sets the correct UID and GID for the container -// based on the the image's /etc/passwd contents. If /etc/passwd +// based on the image's /etc/passwd contents. If /etc/passwd // does not exist, or the username is not found in /etc/passwd, // it returns error. func WithUsername(username string) SpecOpts { @@ -1139,3 +1165,85 @@ func WithAnnotations(annotations map[string]string) SpecOpts { return nil } } + +// WithLinuxDevices adds the provided linux devices to the spec +func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.Devices = append(s.Linux.Devices, devices...) + return nil + } +} + +var ErrNotADevice = errors.New("not a device node") + +// WithLinuxDevice adds the device specified by path to the spec +func WithLinuxDevice(path, permissions string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + setResources(s) + + dev, err := deviceFromPath(path, permissions) + if err != nil { + return err + } + + s.Linux.Devices = append(s.Linux.Devices, *dev) + + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{ + Type: dev.Type, + Allow: true, + Major: &dev.Major, + Minor: &dev.Minor, + Access: permissions, + }) + + return nil + } +} + +// WithEnvFile adds environment variables from a file to the container's spec +func WithEnvFile(path string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + var vars []string + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + if sc.Err() != nil { + return sc.Err() + } + vars = append(vars, sc.Text()) + } + return WithEnv(vars)(nil, nil, nil, s) + } +} + +// ErrNoShmMount is returned when there is no /dev/shm mount specified in the config +// and an Opts was trying to set a configuration value on the mount. +var ErrNoShmMount = errors.New("no /dev/shm mount specified") + +// WithDevShmSize sets the size of the /dev/shm mount for the container. +// +// The size value is specified in kb, kilobytes. +func WithDevShmSize(kb int64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + for _, m := range s.Mounts { + if m.Source == "shm" && m.Type == "tmpfs" { + for i, o := range m.Options { + if strings.HasPrefix(o, "size=") { + m.Options[i] = fmt.Sprintf("size=%dk", kb) + return nil + } + } + m.Options = append(m.Options, fmt.Sprintf("size=%dk", kb)) + return nil + } + } + return ErrNoShmMount + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go new file mode 100644 index 0000000000000..918c8f4ec3c53 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go @@ -0,0 +1,64 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + // The type is 32bit on mips. + devNumber = uint64(stat.Rdev) // nolint: unconvert + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go new file mode 100644 index 0000000000000..3f63dfd162bb5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -0,0 +1,63 @@ +// +build !linux,!windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + devNumber = uint64(stat.Rdev) + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index fbe1cb33c28b0..d265d544deff1 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -23,6 +23,7 @@ import ( "github.com/containerd/containerd/containers" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" ) // WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the @@ -65,3 +66,7 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { return nil } } + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + return nil, errors.New("device from path not supported on Windows") +} diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go index 766d34493445c..aa604baab9217 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go @@ -17,6 +17,7 @@ package dialer import ( + "context" "net" "time" @@ -28,8 +29,19 @@ type dialResult struct { err error } +// ContextDialer returns a GRPC net.Conn connected to the provided address +func ContextDialer(ctx context.Context, address string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return timeoutDialer(address, time.Until(deadline)) + } + return timeoutDialer(address, 0) +} + // Dialer returns a GRPC net.Conn connected to the provided address -func Dialer(address string, timeout time.Duration) (net.Conn, error) { +// Deprecated: use ContextDialer and grpc.WithContextDialer. +var Dialer = timeoutDialer + +func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) { var ( stopC = make(chan struct{}) synC = make(chan *dialResult) diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go b/vendor/github.com/containerd/containerd/pkg/process/deleted_state.go similarity index 95% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go rename to vendor/github.com/containerd/containerd/pkg/process/deleted_state.go index fe9d7bf554111..95ad138e062d1 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/deleted_state.go @@ -16,14 +16,13 @@ limitations under the License. */ -package proc +package process import ( "context" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/runtime/proc" google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" ) @@ -67,6 +66,6 @@ func (s *deletedState) SetExited(status int) { // no op } -func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a deleted state") } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go b/vendor/github.com/containerd/containerd/pkg/process/exec.go similarity index 91% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go rename to vendor/github.com/containerd/containerd/pkg/process/exec.go index 5ab232ae7df51..4175dcd5a4ee3 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go +++ b/vendor/github.com/containerd/containerd/pkg/process/exec.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -31,7 +31,8 @@ import ( "golang.org/x/sys/unix" "github.com/containerd/console" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -49,10 +50,10 @@ type execProcess struct { io *processIO status int exited time.Time - pid *safePid + pid safePid closers []io.Closer stdin io.Closer - stdio proc.Stdio + stdio stdio.Stdio path string spec specs.Process @@ -95,6 +96,7 @@ func (e *execProcess) setExited(status int) { e.status = status e.exited = time.Now() e.parent.Platform.ShutdownConsole(context.Background(), e.console) + e.pid.set(StoppedPID) close(e.waitBlock) } @@ -106,7 +108,7 @@ func (e *execProcess) Delete(ctx context.Context) error { } func (e *execProcess) delete(ctx context.Context) error { - e.wg.Wait() + waitTimeout(ctx, &e.wg, 2*time.Second) if e.io != nil { for _, c := range e.closers { c.Close() @@ -142,7 +144,12 @@ func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error { func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { pid := e.pid.get() - if pid != 0 { + switch { + case pid == 0: + return errors.Wrap(errdefs.ErrFailedPrecondition, "process not created") + case pid < 0: + return errors.Wrapf(errdefs.ErrNotFound, "process already finished") + default: if err := unix.Kill(pid, syscall.Signal(sig)); err != nil { return errors.Wrapf(checkKillError(err), "exec kill error") } @@ -154,7 +161,7 @@ func (e *execProcess) Stdin() io.Closer { return e.stdin } -func (e *execProcess) Stdio() proc.Stdio { +func (e *execProcess) Stdio() stdio.Stdio { return e.stdio } @@ -254,10 +261,13 @@ func (e *execProcess) Status(ctx context.Context) (string, error) { } e.mu.Lock() defer e.mu.Unlock() - // if we don't have a pid then the exec process has just been created + // if we don't have a pid(pid=0) then the exec process has just been created if e.pid.get() == 0 { return "created", nil } + if e.pid.get() == StoppedPID { + return "stopped", nil + } // if we have a pid and it can be signaled, the process is running if err := unix.Kill(e.pid.get(), 0); err == nil { return "running", nil diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go b/vendor/github.com/containerd/containerd/pkg/process/exec_state.go similarity index 99% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go rename to vendor/github.com/containerd/containerd/pkg/process/exec_state.go index 12489501ba880..a8b44bb8bcddd 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/exec_state.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go b/vendor/github.com/containerd/containerd/pkg/process/init.go similarity index 95% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go rename to vendor/github.com/containerd/containerd/pkg/process/init.go index 10787ed878296..7861bdd8b6863 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go +++ b/vendor/github.com/containerd/containerd/pkg/process/init.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -33,7 +33,7 @@ import ( "github.com/containerd/console" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" @@ -59,15 +59,15 @@ type Init struct { id string Bundle string console console.Console - Platform proc.Platform + Platform stdio.Platform io *processIO runtime *runc.Runc status int exited time.Time - pid int + pid safePid closers []io.Closer stdin io.Closer - stdio proc.Stdio + stdio stdio.Stdio Rootfs string IoUID int IoGID int @@ -93,7 +93,7 @@ func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Ru } // New returns a new process -func New(id string, runtime *runc.Runc, stdio proc.Stdio) *Init { +func New(id string, runtime *runc.Runc, stdio stdio.Stdio) *Init { p := &Init{ id: id, runtime: runtime, @@ -113,6 +113,9 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { pio *processIO pidFile = newPidFile(p.Bundle) ) + p.pid.Lock() + defer p.pid.Unlock() + if r.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { return errors.Wrap(err, "failed to create OCI runtime console socket") @@ -167,7 +170,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { if err != nil { return errors.Wrap(err, "failed to retrieve OCI runtime container pid") } - p.pid = pid + p.pid.pid = pid return nil } @@ -213,7 +216,7 @@ func (p *Init) ID() string { // Pid of the process func (p *Init) Pid() int { - return p.pid + return p.pid.get() } // ExitStatus of the process @@ -272,6 +275,7 @@ func (p *Init) setExited(status int) { p.exited = time.Now() p.status = status p.Platform.ShutdownConsole(context.Background(), p.console) + p.pid.set(StoppedPID) close(p.waitBlock) } @@ -284,7 +288,7 @@ func (p *Init) Delete(ctx context.Context) error { } func (p *Init) delete(ctx context.Context) error { - p.wg.Wait() + waitTimeout(ctx, &p.wg, 2*time.Second) err := p.runtime.Delete(ctx, p.id, nil) // ignore errors if a runtime has already deleted the process // but we still hold metadata and pipes @@ -324,13 +328,6 @@ func (p *Init) Resize(ws console.WinSize) error { return p.console.Resize(ws) } -func (p *Init) resize(ws console.WinSize) error { - if p.console == nil { - return nil - } - return p.console.Resize(ws) -} - // Pause the init process and all its child processes func (p *Init) Pause(ctx context.Context) error { p.mu.Lock() @@ -384,7 +381,7 @@ func (p *Init) Runtime() *runc.Runc { } // Exec returns a new child process -func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { p.mu.Lock() defer p.mu.Unlock() @@ -392,7 +389,7 @@ func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce } // exec returns a new exec'd process -func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { // process exec request var spec specs.Process if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { @@ -405,14 +402,13 @@ func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce path: path, parent: p, spec: spec, - stdio: proc.Stdio{ + stdio: stdio.Stdio{ Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, Terminal: r.Terminal, }, waitBlock: make(chan struct{}), - pid: &safePid{}, } e.execState = &execCreatedState{p: e} return e, nil @@ -472,7 +468,7 @@ func (p *Init) update(ctx context.Context, r *google_protobuf.Any) error { } // Stdio of the process -func (p *Init) Stdio() proc.Stdio { +func (p *Init) Stdio() stdio.Stdio { return p.stdio } @@ -492,7 +488,7 @@ func (p *Init) runtimeError(rErr error, msg string) error { } } -func withConditionalIO(c proc.Stdio) runc.IOOpt { +func withConditionalIO(c stdio.Stdio) runc.IOOpt { return func(o *runc.IOOption) { o.OpenStdin = c.Stdin != "" o.OpenStdout = c.Stdout != "" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go b/vendor/github.com/containerd/containerd/pkg/process/init_state.go similarity index 92% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go rename to vendor/github.com/containerd/containerd/pkg/process/init_state.go index 51849c62b4f62..9ec1d17be0e48 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/init_state.go @@ -16,13 +16,11 @@ limitations under the License. */ -package proc +package process import ( "context" - "github.com/containerd/console" - "github.com/containerd/containerd/runtime/proc" runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" @@ -30,14 +28,13 @@ import ( ) type initState interface { - Resize(console.WinSize) error Start(context.Context) error Delete(context.Context) error Pause(context.Context) error Resume(context.Context) error Update(context.Context, *google_protobuf.Any) error Checkpoint(context.Context, *CheckpointConfig) error - Exec(context.Context, string, *ExecConfig) (proc.Process, error) + Exec(context.Context, string, *ExecConfig) (Process, error) Kill(context.Context, uint32, bool) error SetExited(int) } @@ -76,10 +73,6 @@ func (s *createdState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro return errors.Errorf("cannot checkpoint a task in created state") } -func (s *createdState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *createdState) Start(ctx context.Context) error { if err := s.p.start(ctx); err != nil { return err @@ -106,7 +99,7 @@ func (s *createdState) SetExited(status int) { } } -func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return s.p.exec(ctx, path, r) } @@ -145,14 +138,13 @@ func (s *createdCheckpointState) Checkpoint(ctx context.Context, r *CheckpointCo return errors.Errorf("cannot checkpoint a task in created state") } -func (s *createdCheckpointState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *createdCheckpointState) Start(ctx context.Context) error { p := s.p sio := p.stdio + p.pid.Lock() + defer p.pid.Unlock() + var ( err error socket *runc.Socket @@ -192,7 +184,7 @@ func (s *createdCheckpointState) Start(ctx context.Context) error { if err != nil { return errors.Wrap(err, "failed to retrieve OCI runtime container pid") } - p.pid = pid + p.pid.pid = pid return s.transition("running") } @@ -215,7 +207,7 @@ func (s *createdCheckpointState) SetExited(status int) { } } -func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a created state") } @@ -255,10 +247,6 @@ func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro return s.p.checkpoint(ctx, r) } -func (s *runningState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *runningState) Start(ctx context.Context) error { return errors.Errorf("cannot start a running process") } @@ -279,7 +267,7 @@ func (s *runningState) SetExited(status int) { } } -func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return s.p.exec(ctx, path, r) } @@ -319,10 +307,6 @@ func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error return s.p.checkpoint(ctx, r) } -func (s *pausedState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *pausedState) Start(ctx context.Context) error { return errors.Errorf("cannot start a paused process") } @@ -347,7 +331,7 @@ func (s *pausedState) SetExited(status int) { } } -func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a paused state") } @@ -381,10 +365,6 @@ func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro return errors.Errorf("cannot checkpoint a stopped container") } -func (s *stoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") -} - func (s *stoppedState) Start(ctx context.Context) error { return errors.Errorf("cannot start a stopped process") } @@ -404,6 +384,6 @@ func (s *stoppedState) SetExited(status int) { // no op } -func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a stopped state") } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go b/vendor/github.com/containerd/containerd/pkg/process/io.go similarity index 89% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go rename to vendor/github.com/containerd/containerd/pkg/process/io.go index 0096db7169fd7..169f6c8e2072f 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go +++ b/vendor/github.com/containerd/containerd/pkg/process/io.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -32,7 +32,7 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" "github.com/pkg/errors" @@ -50,7 +50,7 @@ type processIO struct { uri *url.URL copy bool - stdio proc.Stdio + stdio stdio.Stdio } func (p *processIO) Close() error { @@ -76,7 +76,7 @@ func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error { return nil } -func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio) (*processIO, error) { +func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio stdio.Stdio) (*processIO, error) { pio := &processIO{ stdio: stdio, } @@ -101,17 +101,20 @@ func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio pio.copy = true pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) case "binary": - pio.io, err = newBinaryIO(ctx, id, u) + pio.io, err = NewBinaryIO(ctx, id, u) case "file": - if err := os.MkdirAll(filepath.Dir(u.Host), 0755); err != nil { + filePath := u.Path + if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { return nil, err } var f *os.File - f, err = os.OpenFile(u.Host, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + f, err = os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return nil, err } f.Close() + pio.stdio.Stdout = filePath + pio.stdio.Stderr = filePath pio.copy = true pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) default: @@ -179,10 +182,10 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w ) if ok { if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening w/o fifo %q failed", i.name) } if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening r/o fifo %q failed", i.name) } } else { if sameFile != nil { @@ -191,7 +194,7 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w continue } if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening file %q failed", i.name) } if stdout == stderr { sameFile = &countingWriteCloser{ @@ -251,7 +254,8 @@ func isFifo(path string) (bool, error) { return false, nil } -func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) { +// NewBinaryIO runs a custom binary process for pluggable shim logging +func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -264,7 +268,7 @@ func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) } } ctx, cancel := context.WithCancel(ctx) - cmd := exec.CommandContext(ctx, uri.Host, args...) + cmd := exec.CommandContext(ctx, uri.Path, args...) cmd.Env = append(cmd.Env, "CONTAINER_ID="+id, "CONTAINER_NAMESPACE="+ns, diff --git a/vendor/github.com/containerd/containerd/runtime/proc/proc.go b/vendor/github.com/containerd/containerd/pkg/process/process.go similarity index 70% rename from vendor/github.com/containerd/containerd/runtime/proc/proc.go rename to vendor/github.com/containerd/containerd/pkg/process/process.go index 0e8d21b749367..7cebb9b309072 100644 --- a/vendor/github.com/containerd/containerd/runtime/proc/proc.go +++ b/vendor/github.com/containerd/containerd/pkg/process/process.go @@ -14,30 +14,17 @@ limitations under the License. */ -package proc +package process import ( "context" "io" - "sync" "time" "github.com/containerd/console" + "github.com/containerd/containerd/pkg/stdio" ) -// Stdio of a process -type Stdio struct { - Stdin string - Stdout string - Stderr string - Terminal bool -} - -// IsNull returns true if the stdio is not defined -func (s Stdio) IsNull() bool { - return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" -} - // Process on a system type Process interface { // ID returns the id for the process @@ -51,7 +38,7 @@ type Process interface { // Stdin returns the process STDIN Stdin() io.Closer // Stdio returns io information for the container - Stdio() Stdio + Stdio() stdio.Stdio // Status returns the process status Status(context.Context) (string, error) // Wait blocks until the process has exited @@ -67,12 +54,3 @@ type Process interface { // SetExited sets the exit status for the process SetExited(status int) } - -// Platform handles platform-specific behavior that may differs across -// platform implementations -type Platform interface { - CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, - wg *sync.WaitGroup) (console.Console, error) - ShutdownConsole(ctx context.Context, console console.Console) error - Close() error -} diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go b/vendor/github.com/containerd/containerd/pkg/process/types.go similarity index 99% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go rename to vendor/github.com/containerd/containerd/pkg/process/types.go index 5d705c030f26d..03477038ab97f 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go +++ b/vendor/github.com/containerd/containerd/pkg/process/types.go @@ -14,7 +14,7 @@ limitations under the License. */ -package proc +package process import ( google_protobuf "github.com/gogo/protobuf/types" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go b/vendor/github.com/containerd/containerd/pkg/process/utils.go similarity index 68% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go rename to vendor/github.com/containerd/containerd/pkg/process/utils.go index 75927a4ef4017..6de2f7a12cb79 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go +++ b/vendor/github.com/containerd/containerd/pkg/process/utils.go @@ -16,9 +16,10 @@ limitations under the License. */ -package proc +package process import ( + "context" "encoding/json" "fmt" "io" @@ -34,6 +35,15 @@ import ( "golang.org/x/sys/unix" ) +const ( + // RuncRoot is the path to the root runc state directory + RuncRoot = "/run/containerd/runc" + // StoppedPID is the pid assigned after a container has run and stopped + StoppedPID = -1 + // InitPidFile name of the file that contains the init pid + InitPidFile = "init.pid" +) + // safePid is a thread safe wrapper for pid. type safePid struct { sync.Mutex @@ -46,6 +56,12 @@ func (s *safePid) get() int { return s.pid } +func (s *safePid) set(pid int) { + s.Lock() + s.pid = pid + s.Unlock() +} + // TODO(mlaventure): move to runc package? func getLastRuntimeError(r *runc.Runc) (string, error) { if r.Log == "" { @@ -56,6 +72,7 @@ func getLastRuntimeError(r *runc.Runc) (string, error) { if err != nil { return "", err } + defer f.Close() var ( errMsg string @@ -110,15 +127,13 @@ func checkKillError(err error) error { } if strings.Contains(err.Error(), "os: process already finished") || strings.Contains(err.Error(), "container not running") || + strings.Contains(strings.ToLower(err.Error()), "no such process") || err == unix.ESRCH { return errors.Wrapf(errdefs.ErrNotFound, "process already finished") } return errors.Wrapf(err, "unknown error after kill") } -// InitPidFile name of the file that contains the init pid -const InitPidFile = "init.pid" - func newPidFile(bundle string) *pidFile { return &pidFile{ path: filepath.Join(bundle, InitPidFile), @@ -142,3 +157,37 @@ func (p *pidFile) Path() string { func (p *pidFile) Read() (int, error) { return runc.ReadPidFile(p.path) } + +// waitTimeout handles waiting on a waitgroup with a specified timeout. +// this is commonly used for waiting on IO to finish after a process has exited +func waitTimeout(ctx context.Context, wg *sync.WaitGroup, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + done := make(chan struct{}, 1) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func stateName(v interface{}) string { + switch v.(type) { + case *runningState, *execRunningState: + return "running" + case *createdState, *execCreatedState, *createdCheckpointState: + return "created" + case *pausedState: + return "paused" + case *deletedState: + return "deleted" + case *stoppedState: + return "stopped" + } + panic(errors.Errorf("invalid state %v", v)) +} diff --git a/vendor/github.com/containerd/continuity/hardlinks_windows.go b/vendor/github.com/containerd/containerd/pkg/stdio/platform.go similarity index 58% rename from vendor/github.com/containerd/continuity/hardlinks_windows.go rename to vendor/github.com/containerd/containerd/pkg/stdio/platform.go index 5893f4e1ae26c..6e1b27cfad0f0 100644 --- a/vendor/github.com/containerd/continuity/hardlinks_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/stdio/platform.go @@ -14,15 +14,20 @@ limitations under the License. */ -package continuity +package stdio -import "os" +import ( + "context" + "sync" -type hardlinkKey struct{} + "github.com/containerd/console" +) -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - // NOTE(stevvooe): Obviously, this is not yet implemented. However, the - // makings of an implementation are available in src/os/types_windows.go. More - // investigation needs to be done to figure out exactly how to do this. - return hardlinkKey{}, errNotAHardLink +// Platform handles platform-specific behavior that may differs across +// platform implementations +type Platform interface { + CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, + wg *sync.WaitGroup) (console.Console, error) + ShutdownConsole(ctx context.Context, console console.Console) error + Close() error } diff --git a/vendor/github.com/containerd/continuity/resource_windows.go b/vendor/github.com/containerd/containerd/pkg/stdio/stdio.go similarity index 68% rename from vendor/github.com/containerd/continuity/resource_windows.go rename to vendor/github.com/containerd/containerd/pkg/stdio/stdio.go index f9801801cfc9b..b02e77dcdd7dd 100644 --- a/vendor/github.com/containerd/continuity/resource_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/stdio/stdio.go @@ -14,15 +14,17 @@ limitations under the License. */ -package continuity +package stdio -import "os" +// Stdio of a process +type Stdio struct { + Stdin string + Stdout string + Stderr string + Terminal bool +} -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - }, nil +// IsNull returns true if the stdio is not defined +func (s Stdio) IsNull() bool { + return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" } diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go index 8259bbc851c48..3ad22a10d0ce1 100644 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -29,11 +29,48 @@ type MatchComparer interface { // Only returns a match comparer for a single platform // using default resolution logic for the platform. // +// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes) // For ARMv7, will also match ARMv6 and ARMv5 // For ARMv6, will also match ARMv5 func Only(platform specs.Platform) MatchComparer { platform = Normalize(platform) if platform.Architecture == "arm" { + if platform.Variant == "v8" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v7", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } if platform.Variant == "v7" { return orderedPlatformComparer{ matchers: []Matcher{ diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index bf6476b6419a4..69b336d67f772 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -97,7 +97,7 @@ func getCPUVariant() string { } switch variant { - case "8": + case "8", "AArch64": variant = "v8" case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": variant = "v7" diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index 8e85448ed028c..6ede94061eb88 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -28,7 +28,7 @@ func isLinuxOS(os string) bool { return os == "linux" } -// These function are generated from from https://golang.org/src/go/build/syslist.go. +// These function are generated from https://golang.org/src/go/build/syslist.go. // // We use switch statements because they are slightly faster than map lookups // and use a little less memory. @@ -38,7 +38,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +60,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 2c2cc1102e189..d2b73ac3d3996 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -130,7 +130,7 @@ type Matcher interface { // specification. The returned matcher only looks for equality based on os, // architecture and variant. // -// One may implement their own matcher if this doesn't provide the the required +// One may implement their own matcher if this doesn't provide the required // functionality. // // Applications should opt to use `Match` over directly parsing specifiers. diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go index 1211c907ef671..75b7366fca36e 100644 --- a/vendor/github.com/containerd/containerd/plugin/context.go +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -28,12 +28,13 @@ import ( // InitContext is used for plugin inititalization type InitContext struct { - Context context.Context - Root string - State string - Config interface{} - Address string - Events *exchange.Exchange + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + Events *exchange.Exchange Meta *Meta // plugins can fill in metadata at init. diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index 9ae8bbeb5ff68..c7d2724140538 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -30,7 +30,8 @@ var ( ErrNoType = errors.New("plugin: no type") // ErrNoPluginID is returned when no id is specified ErrNoPluginID = errors.New("plugin: no id") - + // ErrIDRegistered is returned when a duplicate id is already registered + ErrIDRegistered = errors.New("plugin: id already registered") // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded, // this allows the plugin loader differentiate between a plugin which is configured // not to load and one that fails to load. @@ -100,6 +101,8 @@ type Registration struct { // context are passed in. The init function may modify the registration to // add exports, capabilities and platform support declarations. InitFn func(*InitContext) (interface{}, error) + // Disable the plugin from loading + Disable bool } // Init the registered plugin @@ -157,12 +160,16 @@ func Load(path string) (err error) { func Register(r *Registration) { register.Lock() defer register.Unlock() + if r.Type == "" { panic(ErrNoType) } if r.ID == "" { panic(ErrNoPluginID) } + if err := checkUnique(r); err != nil { + panic(err) + } var last bool for _, requires := range r.Requires { @@ -177,24 +184,36 @@ func Register(r *Registration) { register.r = append(register.r, r) } +func checkUnique(r *Registration) error { + for _, registered := range register.r { + if r.URI() == registered.URI() { + return errors.Wrap(ErrIDRegistered, r.URI()) + } + } + return nil +} + +// DisableFilter filters out disabled plugins +type DisableFilter func(r *Registration) bool + // Graph returns an ordered list of registered plugins for initialization. // Plugins in disableList specified by id will be disabled. -func Graph(disableList []string) (ordered []*Registration) { +func Graph(filter DisableFilter) (ordered []*Registration) { register.RLock() defer register.RUnlock() - for _, d := range disableList { - for i, r := range register.r { - if r.ID == d { - register.r = append(register.r[:i], register.r[i+1:]...) - break - } + + for _, r := range register.r { + if filter(r) { + r.Disable = true } } added := map[*Registration]bool{} for _, r := range register.r { - - children(r.ID, r.Requires, added, &ordered) + if r.Disable { + continue + } + children(r, added, &ordered) if !added[r] { ordered = append(ordered, r) added[r] = true @@ -203,11 +222,13 @@ func Graph(disableList []string) (ordered []*Registration) { return ordered } -func children(id string, types []Type, added map[*Registration]bool, ordered *[]*Registration) { - for _, t := range types { +func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) { + for _, t := range reg.Requires { for _, r := range register.r { - if r.ID != id && (t == "*" || r.Type == t) { - children(r.ID, r.Requires, added, ordered) + if !r.Disable && + r.URI() != reg.URI() && + (t == "*" || r.Type == t) { + children(r, added, ordered) if !added[r] { *ordered = append(*ordered, r) added[r] = true diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index 14732d99b6c65..5b302569babe8 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -44,7 +44,7 @@ type Process interface { Wait(context.Context) (<-chan ExitStatus, error) // CloseIO allows various pipes to be closed on the process CloseIO(context.Context, ...IOCloserOpts) error - // Resize changes the width and heigh of the process's terminal + // Resize changes the width and height of the process's terminal Resize(ctx context.Context, w, h uint32) error // IO returns the io set for the process IO() cio.IO @@ -61,7 +61,7 @@ func NewExitStatus(code uint32, t time.Time, err error) *ExitStatus { } } -// ExitStatus encapsulates a process' exit status. +// ExitStatus encapsulates a process's exit status. // It is used by `Wait()` to return either a process exit code or an error type ExitStatus struct { code uint32 diff --git a/vendor/github.com/containerd/containerd/pull.go b/vendor/github.com/containerd/containerd/pull.go index 693dcafe1da21..2520639dff569 100644 --- a/vendor/github.com/containerd/containerd/pull.go +++ b/vendor/github.com/containerd/containerd/pull.go @@ -32,7 +32,7 @@ import ( // Pull downloads the provided content into containerd's content store // and returns a platform specific image object -func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) { +func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) { pullCtx := defaultRemoteContext() for _, o := range opts { if err := o(c, pullCtx); err != nil { @@ -44,7 +44,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image if len(pullCtx.Platforms) > 1 { return nil, errors.New("cannot pull multiplatform image locally, try Fetch") } else if len(pullCtx.Platforms) == 0 { - pullCtx.PlatformMatcher = platforms.Default() + pullCtx.PlatformMatcher = c.platform } else { p, err := platforms.Parse(pullCtx.Platforms[0]) if err != nil { @@ -61,6 +61,30 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image } defer done(ctx) + var unpacks int32 + if pullCtx.Unpack { + // unpacker only supports schema 2 image, for schema 1 this is noop. + u, err := c.newUnpacker(ctx, pullCtx) + if err != nil { + return nil, errors.Wrap(err, "create unpacker") + } + unpackWrapper, eg := u.handlerWrapper(ctx, &unpacks) + defer func() { + if err := eg.Wait(); err != nil { + if retErr == nil { + retErr = errors.Wrap(err, "unpack") + } + } + }() + wrapper := pullCtx.HandlerWrapper + pullCtx.HandlerWrapper = func(h images.Handler) images.Handler { + if wrapper == nil { + return unpackWrapper(h) + } + return wrapper(unpackWrapper(h)) + } + } + img, err := c.fetch(ctx, pullCtx, ref, 1) if err != nil { return nil, err @@ -69,8 +93,12 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher) if pullCtx.Unpack { - if err := i.Unpack(ctx, pullCtx.Snapshotter); err != nil { - return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) + if unpacks == 0 { + // Try to unpack is none is done previously. + // This is at least required for schema 1 image. + if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { + return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) + } } } @@ -112,9 +140,14 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim childrenHandler := images.ChildrenHandler(store) // Set any children labels for that content childrenHandler = images.SetChildrenLabels(store, childrenHandler) - // Filter manifests by platforms but allow to handle manifest - // and configuration for not-target platforms - childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) + if rCtx.AllMetadata { + // Filter manifests by platforms but allow to handle manifest + // and configuration for not-target platforms + childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) + } else { + // Filter children by platforms if specified. + childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher) + } // Sort and limit manifests if a finite number is needed if limit > 0 { childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit) @@ -131,22 +164,18 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim }, ) + appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref) + if err != nil { + return images.Image{}, err + } + handlers := append(rCtx.BaseHandlers, remotes.FetchHandler(store, fetcher), convertibleHandler, childrenHandler, + appendDistSrcLabelHandler, ) - // append distribution source label to blob data - if rCtx.AppendDistributionSourceLabel { - appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref) - if err != nil { - return images.Image{}, err - } - - handlers = append(handlers, appendDistSrcLabelHandler) - } - handler = images.Handlers(handlers...) converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) { diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 73adb5a2f16f9..9652d3ac1b3e8 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -40,126 +40,278 @@ type dockerAuthorizer struct { credentials func(string) (string, string, error) client *http.Client + header http.Header mu sync.Mutex - auth map[string]string + // indexed by host name + handlers map[string]*authHandler } // NewAuthorizer creates a Docker authorizer using the provided function to // get credentials for the token server or basic auth. +// Deprecated: Use NewDockerAuthorizer func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - if client == nil { - client = http.DefaultClient + return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client } +} + +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) + } + + if ao.client == nil { + ao.client = http.DefaultClient + } + return &dockerAuthorizer{ - credentials: f, - client: client, - auth: map[string]string{}, + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), } } +// Authorize handles auth request. func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { - // TODO: Lookup matching challenge and scope rather than just host - if auth := a.getAuth(req.URL.Host); auth != "" { - req.Header.Set("Authorization", auth) + // skip if there is no auth handler + ah := a.getAuthHandler(req.URL.Host) + if ah == nil { + return nil } + auth, err := ah.authorize(ctx) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) return nil } +func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { + a.mu.Lock() + defer a.mu.Unlock() + + return a.handlers[host] +} + func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { last := responses[len(responses)-1] host := last.Request.URL.Host + + a.mu.Lock() + defer a.mu.Unlock() for _, c := range parseAuthHeader(last.Header) { if c.scheme == bearerAuth { if err := invalidAuthorization(c, responses); err != nil { - // TODO: Clear token - a.setAuth(host, "") + delete(a.handlers, host) return err } - // TODO(dmcg): Store challenge, not token - // Move token fetching to authorize - return a.setTokenAuth(ctx, host, c.parameters) + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if _, ok := a.handlers[host]; ok { + return nil + } + + common, err := a.generateTokenOptions(ctx, host, c) + if err != nil { + return err + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + return nil } else if c.scheme == basicAuth && a.credentials != nil { - // TODO: Resolve credentials on authorize username, secret, err := a.credentials(host) if err != nil { return err } + if username != "" && secret != "" { - auth := username + ":" + secret - a.setAuth(host, fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(auth)))) + common := tokenOptions{ + username: username, + secret: secret, + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) return nil } } } - return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") } -func (a *dockerAuthorizer) getAuth(host string) string { - a.mu.Lock() - defer a.mu.Unlock() - - return a.auth[host] -} - -func (a *dockerAuthorizer) setAuth(host string, auth string) bool { - a.mu.Lock() - defer a.mu.Unlock() - - changed := a.auth[host] != auth - a.auth[host] = auth - - return changed -} - -func (a *dockerAuthorizer) setTokenAuth(ctx context.Context, host string, params map[string]string) error { - realm, ok := params["realm"] +func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) { + realm, ok := c.parameters["realm"] if !ok { - return errors.New("no realm specified for token auth challenge") + return tokenOptions{}, errors.New("no realm specified for token auth challenge") } realmURL, err := url.Parse(realm) if err != nil { - return errors.Wrap(err, "invalid token auth challenge realm") + return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") } to := tokenOptions{ realm: realmURL.String(), - service: params["service"], + service: c.parameters["service"], } - to.scopes = getTokenScopes(ctx, params) - if len(to.scopes) == 0 { - return errors.Errorf("no scope specified for token auth challenge") + scope, ok := c.parameters["scope"] + if !ok { + return tokenOptions{}, errors.Errorf("no scope specified for token auth challenge") } + to.scopes = append(to.scopes, scope) if a.credentials != nil { to.username, to.secret, err = a.credentials(host) if err != nil { - return err + return tokenOptions{}, err } } + return to, nil +} + +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + err error +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + header http.Header + + client *http.Client + + // only support basic and bearer schemes + scheme authenticationScheme + + // common contains common challenge answer + common tokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult +} + +func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler { + return &authHandler{ + header: hdr, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + } +} + +func (ah *authHandler) authorize(ctx context.Context) (string, error) { + switch ah.scheme { + case basicAuth: + return ah.doBasicAuth(ctx) + case bearerAuth: + return ah.doBearerAuth(ctx) + default: + return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { + username, secret := ah.common.username, ah.common.secret + + if username == "" || secret == "" { + return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), nil +} - var token string +func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { + // copy common tokenOptions + to := ah.common + + to.scopes = getTokenScopes(ctx, to.scopes) + if len(to.scopes) == 0 { + return "", errors.Errorf("no scope specified for token auth challenge") + } + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.scopes, " ") + + ah.Lock() + if r, exist := ah.scopedTokens[scoped]; exist { + ah.Unlock() + r.Wait() + return r.token, r.err + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + // fetch token for the resource scope + var ( + token string + err error + ) if to.secret != "" { - // Credential information is provided, use oauth POST endpoint - token, err = a.fetchTokenWithOAuth(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch oauth token") - } + // credential information is provided, use oauth POST endpoint + token, err = ah.fetchTokenWithOAuth(ctx, to) + err = errors.Wrap(err, "failed to fetch oauth token") } else { - // Do request anonymously - token, err = a.fetchToken(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch anonymous token") - } + // do request anonymously + token, err = ah.fetchToken(ctx, to) + err = errors.Wrap(err, "failed to fetch anonymous token") } - a.setAuth(host, fmt.Sprintf("Bearer %s", token)) + token = fmt.Sprintf("Bearer %s", token) - return nil + r.token, r.err = token, err + r.Done() + return r.token, r.err } type tokenOptions struct { @@ -178,7 +330,7 @@ type postTokenResponse struct { Scope string `json:"scope"` } -func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { +func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { form := url.Values{} form.Set("scope", strings.Join(to.scopes, " ")) form.Set("service", to.service) @@ -194,11 +346,18 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti form.Set("password", to.secret) } - resp, err := ctxhttp.Post( - ctx, a.client, to.realm, - "application/x-www-form-urlencoded; charset=utf-8", - strings.NewReader(form.Encode()), - ) + req, err := http.NewRequest("POST", to.realm, strings.NewReader(form.Encode())) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } + } + + resp, err := ctxhttp.Do(ctx, ah.client, req) if err != nil { return "", err } @@ -208,7 +367,7 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti // As of September 2017, GCR is known to return 404. // As of February 2018, JFrog Artifactory is known to return 401. if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { - return a.fetchToken(ctx, to) + return ah.fetchToken(ctx, to) } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB log.G(ctx).WithFields(logrus.Fields{ @@ -237,13 +396,19 @@ type getTokenResponse struct { RefreshToken string `json:"refresh_token"` } -// getToken fetches a token using a GET request -func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (string, error) { +// fetchToken fetches a token using a GET request +func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) { req, err := http.NewRequest("GET", to.realm, nil) if err != nil { return "", err } + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } + } + reqParams := req.URL.Query() if to.service != "" { @@ -260,7 +425,7 @@ func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (str req.URL.RawQuery = reqParams.Encode() - resp, err := ctxhttp.Do(ctx, a.client, req) + resp, err := ctxhttp.Do(ctx, ah.client, req) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 6f06b0e50c850..ad8482fa39214 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -23,7 +23,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "github.com/containerd/containerd/errdefs" @@ -32,7 +32,6 @@ import ( "github.com/docker/distribution/registry/api/errcode" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type dockerFetcher struct { @@ -40,26 +39,46 @@ type dockerFetcher struct { } func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields( - logrus.Fields{ - "base": r.base.String(), - "digest": desc.Digest, - }, - )) - - urls, err := r.getV2URLPaths(ctx, desc) - if err != nil { - return nil, err + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") } - ctx, err = contextWithRepositoryScope(ctx, r.refspec, false) + ctx, err := contextWithRepositoryScope(ctx, r.refspec, false) if err != nil { return nil, err } return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { - for _, u := range urls { - rc, err := r.open(ctx, u, desc.MediaType, offset) + // firstly try fetch via external urls + for _, us := range desc.URLs { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) + + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debug("failed to parse") + continue + } + log.G(ctx).Debug("trying alternative url") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) if err != nil { if errdefs.IsNotFound(err) { continue // try one of the other urls. @@ -71,6 +90,44 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return rc, nil } + // Try manifests endpoints for manifests types + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + } + + // Finally use blobs endpoints + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + return nil, errors.Wrapf(errdefs.ErrNotFound, "could not fetch content descriptor %v (%v) from remote", desc.Digest, desc.MediaType) @@ -78,22 +135,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R }) } -func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) { - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", ")) +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) if offset > 0 { // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints // will return the header without supporting the range. The content // range must always be checked. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) } - resp, err := r.doRequestWithRetries(ctx, req, nil) + resp, err := req.doWithRetries(ctx, nil) if err != nil { return nil, err } @@ -106,13 +158,13 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { - return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u) + return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) } var registryErr errcode.Errors if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) } - return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error()) + return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) } if offset > 0 { cr := resp.Header.Get("content-range") @@ -141,30 +193,3 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int return resp.Body, nil } - -// getV2URLPaths generates the candidate urls paths for the object based on the -// set of hints and the provided object id. URLs are returned in the order of -// most to least likely succeed. -func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) { - var urls []string - - if len(desc.URLs) > 0 { - // handle fetch via external urls. - for _, u := range desc.URLs { - log.G(ctx).WithField("url", u).Debug("adding alternative url") - urls = append(urls, u) - } - } - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - images.MediaTypeDockerSchema1Manifest, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - urls = append(urls, r.url(path.Join("manifests", desc.Digest.String()))) - } - - // always fallback to attempting to get the object out of the blobs store. - urls = append(urls, r.url(path.Join("blobs", desc.Digest.String()))) - - return urls, nil -} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/remotes/docker/handler.go index 1a355783b8953..529cfbc274bd2 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/handler.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/handler.go @@ -110,3 +110,45 @@ func appendDistributionSourceLabel(originLabel, repo string) string { func distributionSourceLabelKey(source string) string { return fmt.Sprintf("%s.%s", labelDistributionSource, source) } + +// selectRepositoryMountCandidate will select the repo which has longest +// common prefix components as the candidate. +func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + // NOTE: basically, it won't be error here + return "" + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := sources[distributionSourceLabelKey(source)] + if !ok || repoLabel == "" { + return "" + } + + n, match := 0, "" + components := strings.Split(target, "/") + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + continue + } + + if l := commonPrefixComponents(components, repo); l >= n { + n, match = l, repo + } + } + return match +} + +func commonPrefixComponents(components []string, target string) int { + targetComponents := strings.Split(target, "/") + + i := 0 + for ; i < len(components) && i < len(targetComponents); i++ { + if components[i] != targetComponents[i] { + break + } + } + return i +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index c3c0923f08ac1..a96fe5a9561f9 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -21,7 +21,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "time" @@ -37,7 +37,7 @@ import ( type dockerPusher struct { *dockerBase - tag string + object string // TODO: namespace tracker tracker StatusTracker @@ -59,31 +59,32 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten return nil, errors.Wrap(err, "failed to get status") } + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") + } + var ( isManifest bool - existCheck string + existCheck []string + host = hosts[0] ) switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: isManifest = true - if p.tag == "" { - existCheck = path.Join("manifests", desc.Digest.String()) - } else { - existCheck = path.Join("manifests", p.tag) - } + existCheck = getManifestPath(p.object, desc.Digest) default: - existCheck = path.Join("blobs", desc.Digest.String()) + existCheck = []string{"blobs", desc.Digest.String()} } - req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil) - if err != nil { - return nil, err - } + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) - req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) - resp, err := p.doRequestWithRetries(ctx, req, nil) + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) if err != nil { if errors.Cause(err) != ErrInvalidAuthorization { return nil, err @@ -92,7 +93,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } else { if resp.StatusCode == http.StatusOK { var exists bool - if isManifest && p.tag != "" { + if isManifest && existCheck[1] != desc.Digest.String() { dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) if dgstHeader == desc.Digest { exists = true @@ -116,67 +117,94 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } } - // TODO: Lookup related objects for cross repository push - if isManifest { - var putPath string - if p.tag != "" { - putPath = path.Join("manifests", p.tag) - } else { - putPath = path.Join("manifests", desc.Digest.String()) - } - - req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil) - if err != nil { - return nil, err - } - req.Header.Add("Content-Type", desc.MediaType) + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) } else { - // TODO: Do monolithic upload if size is small - // Start upload request - req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil) - if err != nil { - return nil, err + req = p.request(host, http.MethodPost, "blobs", "uploads/") + + var resp *http.Response + if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) + pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo) + + // NOTE: the fromRepo might be private repo and + // auth service still can grant token without error. + // but the post request will fail because of 401. + // + // for the private repo, we should remove mount-from + // query and send the request again. + resp, err = preq.do(pctx) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized { + log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) + + resp.Body.Close() + resp = nil + } } - resp, err := p.doRequestWithRetries(ctx, req, nil) - if err != nil { - return nil, err + if resp == nil { + resp, err = req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } } switch resp.StatusCode { case http.StatusOK, http.StatusAccepted, http.StatusNoContent: + case http.StatusCreated: + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + }, + }) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) default: // TODO: log error return nil, errors.Errorf("unexpected response: %s", resp.Status) } - location := resp.Header.Get("Location") + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) // Support paths without host in location if strings.HasPrefix(location, "/") { - // Support location string containing path and query - qmIndex := strings.Index(location, "?") - if qmIndex > 0 { - u := p.base - u.Path = location[:qmIndex] - u.RawQuery = location[qmIndex+1:] - location = u.String() - } else { - u := p.base - u.Path = location - location = u.String() + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) } - } - req, err = http.NewRequest(http.MethodPut, location, nil) - if err != nil { - return nil, err + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") + + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil + } } - q := req.URL.Query() + q := lurl.Query() q.Add("digest", desc.Digest.String()) - req.URL.RawQuery = q.Encode() + req = p.request(lhost, http.MethodPut) + req.path = lurl.Path + "?" + q.Encode() } p.tracker.SetStatus(ref, Status{ Status: content.Status{ @@ -191,13 +219,22 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten pr, pw := io.Pipe() respC := make(chan *http.Response, 1) + body := ioutil.NopCloser(pr) - req.Body = ioutil.NopCloser(pr) - req.ContentLength = desc.Size + req.body = func() (io.ReadCloser, error) { + if body == nil { + return nil, errors.New("cannot reuse body, request must be retried") + } + // Only use the body once since pipe cannot be seeked + ob := body + body = nil + return ob, nil + } + req.size = desc.Size go func() { defer close(respC) - resp, err = p.doRequest(ctx, req) + resp, err = req.do(ctx) if err != nil { pr.CloseWithError(err) return @@ -223,6 +260,25 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten }, nil } +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + type pushWriter struct { base *dockerBase ref string @@ -296,7 +352,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di } if size > 0 && size != status.Offset { - return errors.Errorf("unxpected size %d, expected %d", status.Offset, size) + return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) } if expected == "" { @@ -320,3 +376,16 @@ func (pw *pushWriter) Truncate(size int64) error { // TODO: always error on manifest return errors.New("cannot truncate remote upload") } + +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req + + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" + } + + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/remotes/docker/registry.go new file mode 100644 index 0000000000000..ae24f41e10511 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "net/http" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +func MatchLocalhost(host string) (bool, error) { + for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} { + if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') { + return true, nil + } + } + return host == "::1", nil + +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 00e1c85568d32..f126449c3e489 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -18,9 +18,10 @@ package docker import ( "context" + "fmt" "io" + "io/ioutil" "net/http" - "net/url" "path" "strings" @@ -46,6 +47,19 @@ var ( // ErrInvalidAuthorization is used when credentials are passed to a server but // those credentials are rejected. ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 ) // Authorizer is used to authorize HTTP requests based on 401 HTTP responses. @@ -72,31 +86,38 @@ type Authorizer interface { // ResolverOptions are used to configured a new Docker register resolver type ResolverOptions struct { + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts + + // Headers are the HTTP request header fields sent by the resolver + Headers http.Header + + // Tracker is used to track uploads to the registry. This is used + // since the registry does not have upload tracking and the existing + // mechanism for getting blob upload status is expensive. + Tracker StatusTracker + // Authorizer is used to authorize registry requests + // Deprecated: use Hosts Authorizer Authorizer // Credentials provides username and secret given a host. // If username is empty but a secret is given, that secret // is interpreted as a long lived token. - // Deprecated: use Authorizer + // Deprecated: use Hosts Credentials func(string) (string, string, error) // Host provides the hostname given a namespace. + // Deprecated: use Hosts Host func(string) (string, error) - // Headers are the HTTP request header fields sent by the resolver - Headers http.Header - // PlainHTTP specifies to use plain http and not https + // Deprecated: use Hosts PlainHTTP bool // Client is the http client to used when making registry requests + // Deprecated: use Hosts Client *http.Client - - // Tracker is used to track uploads to the registry. This is used - // since the registry does not have upload tracking and the existing - // mechanism for getting blob upload status is expensive. - Tracker StatusTracker } // DefaultHost is the default host function. @@ -108,12 +129,10 @@ func DefaultHost(ns string) (string, error) { } type dockerResolver struct { - auth Authorizer - host func(string) (string, error) - headers http.Header - plainHTTP bool - client *http.Client - tracker StatusTracker + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker } // NewResolver returns a new resolver to a Docker registry @@ -121,33 +140,56 @@ func NewResolver(options ResolverOptions) remotes.Resolver { if options.Tracker == nil { options.Tracker = NewInMemoryTracker() } - if options.Host == nil { - options.Host = DefaultHost - } + if options.Headers == nil { options.Headers = make(http.Header) } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} if _, ok := options.Headers["Accept"]; !ok { // set headers for all the types we support for resolution. - options.Headers.Set("Accept", strings.Join([]string{ + resolveHeader.Set("Accept", strings.Join([]string{ images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, - ocispec.MediaTypeImageIndex, "*"}, ", ")) - } - if _, ok := options.Headers["User-Agent"]; !ok { - options.Headers.Set("User-Agent", "containerd/"+version.Version) + ocispec.MediaTypeImageIndex, "*/*"}, ", ")) + } else { + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") } - if options.Authorizer == nil { - options.Authorizer = NewAuthorizer(options.Client, options.Credentials) + + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) } return &dockerResolver{ - auth: options.Authorizer, - host: options.Host, - headers: options.Headers, - plainHTTP: options.PlainHTTP, - client: options.Client, - tracker: options.Tracker, + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, } } @@ -194,13 +236,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, err } - fetcher := dockerFetcher{ - dockerBase: base, - } - var ( - urls []string - dgst = refspec.Digest() + lastErr error + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull ) if dgst != "" { @@ -211,100 +251,130 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } // turns out, we have a valid digest, make a url. - urls = append(urls, fetcher.url("manifests", dgst.String())) + paths = append(paths, []string{"manifests", dgst.String()}) // fallback to blobs on not found. - urls = append(urls, fetcher.url("blobs", dgst.String())) + paths = append(paths, []string{"blobs", dgst.String()}) } else { - urls = append(urls, fetcher.url("manifests", refspec.Object)) + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") } ctx, err = contextWithRepositoryScope(ctx, refspec, false) if err != nil { return "", ocispec.Descriptor{}, err } - for _, u := range urls { - req, err := http.NewRequest(http.MethodHead, u, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - req.Header = r.headers + for _, u := range paths { + for _, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) - log.G(ctx).Debug("resolving") - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - if errors.Cause(err) == ErrInvalidAuthorization { - err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") - } - return "", ocispec.Descriptor{}, err - } - resp.Body.Close() // don't care about body contents. - - if resp.StatusCode > 299 { - if resp.StatusCode == http.StatusNotFound { - continue + req := base.request(host, http.MethodHead, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) } - return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) - } - size := resp.ContentLength - - // this is the only point at which we trust the registry. we use the - // content headers to assemble a descriptor for the name. when this becomes - // more robust, we mostly get this information from a secure trust store. - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - contentType := getManifestMediaType(resp) - if dgstHeader != "" && size != -1 { - if err := dgstHeader.Validate(); err != nil { - return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) - } - dgst = dgstHeader - } else { - log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - - req, err := http.NewRequest(http.MethodGet, u, nil) + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) if err != nil { + if errors.Cause(err) == ErrInvalidAuthorization { + err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + } return "", ocispec.Descriptor{}, err } - req.Header = r.headers + resp.Body.Close() // don't care about body contents. - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - return "", ocispec.Descriptor{}, err + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + continue + } + return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) } - defer resp.Body.Close() + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - bodyReader := countingReader{reader: resp.Body} + req = base.request(host, http.MethodGet, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } - contentType = getManifestMediaType(resp) - if contentType == images.MediaTypeDockerSchema1Manifest { - b, err := schema1.ReadStripSignature(&bodyReader) + resp, err := req.doWithRetries(ctx, nil) if err != nil { return "", ocispec.Descriptor{}, err } - - dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { + defer resp.Body.Close() + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + if dgst == "" { + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + dgst = digest.FromBytes(b) + } else { + dgst, err = digest.FromReader(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + } + } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { return "", ocispec.Descriptor{}, err } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if lastErr == nil { + lastErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) + } + continue + } + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, } - size = bodyReader.bytesRead - } - desc := ocispec.Descriptor{ - Digest: dgst, - MediaType: contentType, - Size: size, + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil } + } - log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") - return ref, desc, nil + if lastErr == nil { + lastErr = errors.Wrap(errdefs.ErrNotFound, ref) } - return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref) + return "", ocispec.Descriptor{}, lastErr } func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { @@ -329,13 +399,6 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return nil, err } - // Manifests can be pushed by digest like any other object, but the passed in - // reference cannot take a digest without the associated content. A tag is allowed - // and will be used to tag pushed manifests. - if refspec.Object != "" && strings.Contains(refspec.Object, "@") { - return nil, errors.New("cannot use digest reference for push locator") - } - base, err := r.base(refspec) if err != nil { return nil, err @@ -343,60 +406,64 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return dockerPusher{ dockerBase: base, - tag: refspec.Object, + object: refspec.Object, tracker: r.tracker, }, nil } type dockerBase struct { - refspec reference.Spec - base url.URL - - client *http.Client - auth Authorizer + refspec reference.Spec + namespace string + hosts []RegistryHost + header http.Header } func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { - var ( - err error - base url.URL - ) - host := refspec.Hostname() - base.Host = host - if r.host != nil { - base.Host, err = r.host(host) - if err != nil { - return nil, err - } - } - - base.Scheme = "https" - if r.plainHTTP || strings.HasPrefix(base.Host, "localhost:") { - base.Scheme = "http" + hosts, err := r.hosts(host) + if err != nil { + return nil, err } - - prefix := strings.TrimPrefix(refspec.Locator, host+"/") - base.Path = path.Join("/v2", prefix) - return &dockerBase{ - refspec: refspec, - base: base, - client: r.client, - auth: r.auth, + refspec: refspec, + namespace: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, }, nil } -func (r *dockerBase) url(ps ...string) string { - url := r.base - url.Path = path.Join(url.Path, path.Join(ps...)) - return url.String() +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return +} + +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := http.Header{} + for key, value := range r.header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.namespace}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } } -func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { +func (r *request) authorize(ctx context.Context, req *http.Request) error { // Check if has header for host - if r.auth != nil { - if err := r.auth.Authorize(ctx, req); err != nil { + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { return err } } @@ -404,80 +471,137 @@ func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { return nil } -func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) - log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request") +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequest(r.method, u, nil) + if err != nil { + return nil, err + } + req.Header = r.header + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") if err := r.authorize(ctx, req); err != nil { return nil, errors.Wrap(err, "failed to authorize") } - resp, err := ctxhttp.Do(ctx, r.client, req) + resp, err := ctxhttp.Do(ctx, r.host.Client, req) if err != nil { return nil, errors.Wrap(err, "failed to do request") } - log.G(ctx).WithFields(logrus.Fields{ - "status": resp.Status, - "response.headers": resp.Header, - }).Debug("fetch response received") + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") return resp, nil } -func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) { - resp, err := r.doRequest(ctx, req) +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) if err != nil { return nil, err } responses = append(responses, resp) - req, err = r.retryRequest(ctx, req, responses) + retry, err := r.retryRequest(ctx, responses) if err != nil { resp.Body.Close() return nil, err } - if req != nil { + if retry { resp.Body.Close() - return r.doRequestWithRetries(ctx, req, responses) + return r.doWithRetries(ctx, responses) } return resp, err } -func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) { +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { if len(responses) > 5 { - return nil, nil + return false, nil } last := responses[len(responses)-1] - if last.StatusCode == http.StatusUnauthorized { + switch last.StatusCode { + case http.StatusUnauthorized: log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") - if r.auth != nil { - if err := r.auth.AddResponses(ctx, responses); err == nil { - return copyRequest(req) + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil } else if !errdefs.IsNotImplemented(err) { - return nil, err + return false, err } } - return nil, nil - } else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead { + return false, nil + case http.StatusMethodNotAllowed: // Support registries which have not properly implemented the HEAD method for // manifests endpoint - if strings.Contains(req.URL.Path, "/manifests/") { - // TODO: copy request? - req.Method = http.MethodGet - return copyRequest(req) + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil } + case http.StatusRequestTimeout, http.StatusTooManyRequests: + return true, nil } // TODO: Handle 50x errors accounting for attempt history - return nil, nil + return false, nil } -func copyRequest(req *http.Request) (*http.Request, error) { - ireq := *req - if ireq.GetBody != nil { - var err error - ireq.Body, err = ireq.GetBody() - if err != nil { - return nil, err +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) logrus.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v } } - return &ireq, nil + + return logrus.Fields(fields) +} + +func responseFields(resp *http.Response) logrus.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index 29b41cc143702..8314c01d5a6fc 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -216,12 +216,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De ref := remotes.MakeRefKey(ctx, desc) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest") } ref = remotes.MakeRefKey(ctx, config) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config") } return desc, nil diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go index 52c2443118846..fa84014337133 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -18,6 +18,7 @@ package docker import ( "context" + "fmt" "net/url" "sort" "strings" @@ -50,27 +51,47 @@ func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, pus if err != nil { return nil, err } - return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil + return WithScope(ctx, s), nil } -// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and params["scope"]. -func getTokenScopes(ctx context.Context, params map[string]string) []string { +// WithScope appends a custom registry auth scope to the context. +func WithScope(ctx context.Context, scope string) context.Context { + var scopes []string + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = v.([]string) + scopes = append(scopes, scope) + } else { + scopes = []string{scope} + } + return context.WithValue(ctx, tokenScopesKey{}, scopes) +} + +// contextWithAppendPullRepositoryScope is used to append repository pull +// scope into existing scopes indexed by the tokenScopesKey{}. +func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { + return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) +} + +// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. +func getTokenScopes(ctx context.Context, common []string) []string { var scopes []string if x := ctx.Value(tokenScopesKey{}); x != nil { scopes = append(scopes, x.([]string)...) } - if scope, ok := params["scope"]; ok { - for _, s := range scopes { - // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) - // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. - if s == scope { - // already appended - goto Sort - } + + scopes = append(scopes, common...) + sort.Strings(scopes) + + l := 0 + for idx := 1; idx < len(scopes); idx++ { + // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. + if scopes[l] == scopes[idx] { + continue } - scopes = append(scopes, scope) + + l++ + scopes[l] = scopes[idx] } -Sort: - sort.Strings(scopes) - return scopes + return scopes[:l+1] } diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 0ee56c887ad6e..671fea106b14c 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -33,27 +33,46 @@ import ( "github.com/sirupsen/logrus" ) +type refKeyPrefix struct{} + +// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing +// data in the content store from the FetchHandler. +// +// Used in `MakeRefKey` to determine what the key prefix should be. +func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { + var values map[string]string + if v := ctx.Value(refKeyPrefix{}); v != nil { + values = v.(map[string]string) + } else { + values = make(map[string]string) + } + + values[mediaType] = prefix + return context.WithValue(ctx, refKeyPrefix{}, values) +} + // MakeRefKey returns a unique reference for the descriptor. This reference can be // used to lookup ongoing processes related to the descriptor. This function // may look to the context to namespace the reference appropriately. func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { - // TODO(stevvooe): Need better remote key selection here. Should be a - // product of the context, which may include information about the ongoing - // fetch process. - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if v := ctx.Value(refKeyPrefix{}); v != nil { + values := v.(map[string]string) + if prefix := values[desc.MediaType]; prefix != "" { + return prefix + "-" + desc.Digest.String() + } + } + + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: return "manifest-" + desc.Digest.String() - case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: return "index-" + desc.Digest.String() - case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, - images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, - ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, - ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip: + case images.IsLayerType(mt): return "layer-" + desc.Digest.String() - case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + case images.IsKnownConfig(mt): return "config-" + desc.Digest.String() default: - log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType) + log.G(ctx).Warnf("reference for unknown type: %s", mt) return "unknown-" + desc.Digest.String() } } @@ -156,7 +175,7 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc // // Base handlers can be provided which will be called before any push specific // handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, provider content.Provider, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { var m sync.Mutex manifestStack := []ocispec.Descriptor{} @@ -173,10 +192,14 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr } }) - pushHandler := PushHandler(pusher, provider) + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) var handler images.Handler = images.Handlers( - images.FilterPlatforms(images.ChildrenHandler(provider), platform), + annotateHandler, filterHandler, pushHandler, ) @@ -241,3 +264,45 @@ func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) return descs, nil } } + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + info, err := manager.Info(ctx, child.Digest) + if err != nil { + return nil, err + } + + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go index 3ea830f6b4236..73d4ccca52e0c 100644 --- a/vendor/github.com/containerd/containerd/rootfs/apply.go +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -48,6 +48,14 @@ type Layer struct { // Layers are applied in order they are given, making the first layer the // bottom-most layer in the layer chain. func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) { + return ApplyLayersWithOpts(ctx, layers, sn, a, nil) +} + +// ApplyLayersWithOpts applies all the layers using the given snapshotter, applier, and apply opts. +// The returned result is a chain id digest representing all the applied layers. +// Layers are applied in order they are given, making the first layer the +// bottom-most layer in the layer chain. +func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier, applyOpts []diff.ApplyOpt) (digest.Digest, error) { chain := make([]digest.Digest, len(layers)) for i, layer := range layers { chain[i] = layer.Diff.Digest @@ -63,7 +71,7 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, layers, chain, sn, a); err != nil && !errdefs.IsAlreadyExists(err) { + if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) { return "", err } } @@ -75,6 +83,13 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, // using the provided snapshotter and applier. If the layer was unpacked true // is returned, if the layer already exists false is returned. func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) { + return ApplyLayerWithOpts(ctx, layer, chain, sn, a, opts, nil) +} + +// ApplyLayerWithOpts applies a single layer on top of the given provided layer chain, +// using the provided snapshotter, applier, and apply opts. If the layer was unpacked true +// is returned, if the layer already exists false is returned. +func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) { var ( chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String() applied bool @@ -84,7 +99,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts...); err != nil { + if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return false, err } @@ -93,9 +108,10 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap } } return applied, nil + } -func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) error { +func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) error { var ( parent = identity.ChainID(chain[:len(chain)-1]) chainID = identity.ChainID(chain) @@ -113,7 +129,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) if err != nil { if errdefs.IsNotFound(err) && len(layers) > 1 { - if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a); err != nil { + if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a, nil, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return err } @@ -144,7 +160,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn } }() - diff, err = a.Apply(ctx, layer.Blob, mounts) + diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...) if err != nil { err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest) return err diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go index b3e6ba8a33b61..f396c73ab0948 100644 --- a/vendor/github.com/containerd/containerd/rootfs/diff.go +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -22,6 +22,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/snapshots" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -31,6 +32,13 @@ import ( // the content creation and the provided snapshotter and mount differ are used // for calculating the diff. The descriptor for the layer diff is returned. func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) { + // dctx is used to handle cleanup things just in case the param ctx + // has been canceled, which causes that the defer cleanup fails. + dctx := context.Background() + if ns, ok := namespaces.Namespace(ctx); ok { + dctx = namespaces.WithNamespace(dctx, ns) + } + info, err := sn.Stat(ctx, snapshotID) if err != nil { return ocispec.Descriptor{}, err @@ -41,7 +49,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(ctx, lowerKey) + defer sn.Remove(dctx, lowerKey) var upper []mount.Mount if info.Kind == snapshots.KindActive { @@ -55,7 +63,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(ctx, upperKey) + defer sn.Remove(dctx, upperKey) } return d.Compare(ctx, lower, upper, opts...) diff --git a/vendor/github.com/containerd/containerd/runtime/task.go b/vendor/github.com/containerd/containerd/runtime/task.go index 981e290c68d12..ab9017ba58a7e 100644 --- a/vendor/github.com/containerd/containerd/runtime/task.go +++ b/vendor/github.com/containerd/containerd/runtime/task.go @@ -33,6 +33,7 @@ type TaskInfo struct { // Process is a runtime object for an executing process inside a container type Process interface { + // ID of the process ID() string // State returns the process state State(context.Context) (State, error) @@ -54,6 +55,8 @@ type Process interface { type Task interface { Process + // PID of the process + PID() uint32 // Namespace that the task exists in Namespace() string // Pause pauses the container process diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go index ae0e73f287800..e8b629b79c56c 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go @@ -65,6 +65,10 @@ func newBundle(id, path, workDir string, spec []byte) (b *bundle, err error) { os.RemoveAll(workDir) } }() + rootfs := filepath.Join(path, "rootfs") + if err := os.MkdirAll(rootfs, 0711); err != nil { + return nil, err + } err = ioutil.WriteFile(filepath.Join(path, configFilename), spec, 0666) return &bundle{ id: id, @@ -179,6 +183,9 @@ func atomicDelete(path string) error { // create a hidden dir for an atomic removal atomicPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path))) if err := os.Rename(path, atomicPath); err != nil { + if os.IsNotExist(err) { + return nil + } return err } return os.RemoveAll(atomicPath) diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go deleted file mode 100644 index 53252ec604f5f..0000000000000 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package proc - -import ( - "github.com/pkg/errors" -) - -// RuncRoot is the path to the root runc state directory -const RuncRoot = "/run/containerd/runc" - -func stateName(v interface{}) string { - switch v.(type) { - case *runningState, *execRunningState: - return "running" - case *createdState, *execCreatedState, *createdCheckpointState: - return "created" - case *pausedState: - return "paused" - case *deletedState: - return "deleted" - case *stoppedState: - return "stopped" - } - panic(errors.Errorf("invalid state %v", v)) -} diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go index c408126aee720..fdaff5f9e4dbd 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go @@ -37,12 +37,12 @@ import ( "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/process" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - "github.com/containerd/containerd/runtime/v1" - "github.com/containerd/containerd/runtime/v1/linux/proc" + v1 "github.com/containerd/containerd/runtime/v1" shim "github.com/containerd/containerd/runtime/v1/shim/v1" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" @@ -50,7 +50,6 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" "golang.org/x/sys/unix" ) @@ -112,13 +111,13 @@ func New(ic *plugin.InitContext) (interface{}, error) { } cfg := ic.Config.(*Config) r := &Runtime{ - root: ic.Root, - state: ic.State, - tasks: runtime.NewTaskList(), - db: m.(*metadata.DB), - address: ic.Address, - events: ic.Events, - config: cfg, + root: ic.Root, + state: ic.State, + tasks: runtime.NewTaskList(), + containers: metadata.NewContainerStore(m.(*metadata.DB)), + address: ic.Address, + events: ic.Events, + config: cfg, } tasks, err := r.restoreTasks(ic.Context) if err != nil { @@ -138,9 +137,9 @@ type Runtime struct { state string address string - tasks *runtime.TaskList - db *metadata.DB - events *exchange.Exchange + tasks *runtime.TaskList + containers containers.Store + events *exchange.Exchange config *Config } @@ -191,18 +190,13 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts } exitHandler := func() { log.G(ctx).WithField("id", id).Info("shim reaped") - t, err := r.tasks.Get(ctx, id) - if err != nil { + + if _, err := r.tasks.Get(ctx, id); err != nil { // Task was never started or was already successfully deleted return } - lc := t.(*Task) - log.G(ctx).WithFields(logrus.Fields{ - "id": id, - "namespace": namespace, - }).Warn("cleaning up after killed shim") - if err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id, lc.pid); err != nil { + if err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id); err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ "id": id, "namespace": namespace, @@ -330,20 +324,26 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { continue } id := path.Name() + // skip hidden directories + if len(id) > 0 && id[0] == '.' { + continue + } bundle := loadBundle( id, filepath.Join(r.state, ns, id), filepath.Join(r.root, ns, id), ) ctx = namespaces.WithNamespace(ctx, ns) - pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile)) + shimExit := make(chan struct{}) s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() { - _, err := r.tasks.Get(ctx, id) - if err != nil { + defer close(shimExit) + if _, err := r.tasks.Get(ctx, id); err != nil { // Task was never started or was already successfully deleted return } - if err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid); err != nil { + + if err := r.cleanupAfterDeadShim(ctx, bundle, ns, id); err != nil { log.G(ctx).WithError(err).WithField("bundle", bundle.path). Error("cleaning up after dead shim") } @@ -353,7 +353,7 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { "id": id, "namespace": ns, }).Error("connecting to shim") - err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid) + err := r.cleanupAfterDeadShim(ctx, bundle, ns, id) if err != nil { log.G(ctx).WithError(err).WithField("bundle", bundle.path). Error("cleaning up after dead shim") @@ -363,6 +363,18 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { logDirPath := filepath.Join(r.root, ns, id) + copyAndClose := func(dst io.Writer, src io.ReadWriteCloser) { + copyDone := make(chan struct{}) + go func() { + io.Copy(dst, src) + close(copyDone) + }() + select { + case <-shimExit: + case <-copyDone: + } + src.Close() + } shimStdoutLog, err := v1.OpenShimStdoutLog(ctx, logDirPath) if err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ @@ -372,7 +384,11 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { }).Error("opening shim stdout log pipe") continue } - go io.Copy(os.Stdout, shimStdoutLog) + if r.config.ShimDebug { + go copyAndClose(os.Stdout, shimStdoutLog) + } else { + go copyAndClose(ioutil.Discard, shimStdoutLog) + } shimStderrLog, err := v1.OpenShimStderrLog(ctx, logDirPath) if err != nil { @@ -383,7 +399,11 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { }).Error("opening shim stderr log pipe") continue } - go io.Copy(os.Stderr, shimStderrLog) + if r.config.ShimDebug { + go copyAndClose(os.Stderr, shimStderrLog) + } else { + go copyAndClose(ioutil.Discard, shimStderrLog) + } t, err := newTask(id, ns, pid, s, r.events, r.tasks, bundle) if err != nil { @@ -395,7 +415,13 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { return o, nil } -func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string, pid int) error { +func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string) error { + log.G(ctx).WithFields(logrus.Fields{ + "id": id, + "namespace": ns, + }).Warn("cleaning up after shim dead") + + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile)) ctx = namespaces.WithNamespace(ctx, ns) if err := r.terminate(ctx, bundle, ns, id); err != nil { if r.config.ShimDebug { @@ -418,6 +444,10 @@ func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, if err := bundle.Delete(); err != nil { log.G(ctx).WithError(err).Error("delete bundle") } + // kill shim + if shimPid, err := runc.ReadPidFile(filepath.Join(bundle.path, "shim.pid")); err == nil && shimPid > 0 { + unix.Kill(shimPid, unix.SIGKILL) + } r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ ContainerID: id, @@ -456,7 +486,7 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er var ( cmd = r.config.Runtime - root = proc.RuncRoot + root = process.RuncRoot ) if ropts != nil { if ropts.Runtime != "" { @@ -477,14 +507,8 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er } func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runctypes.RuncOptions, error) { - var container containers.Container - - if err := r.db.View(func(tx *bolt.Tx) error { - store := metadata.NewContainerStore(tx) - var err error - container, err = store.Get(ctx, id) - return err - }); err != nil { + container, err := r.containers.Get(ctx, id) + if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go index e13255e955d2f..0970c3ea3b1a0 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go @@ -84,6 +84,11 @@ func (t *Task) Namespace() string { return t.namespace } +// PID of the task +func (t *Task) PID() uint32 { + return uint32(t.pid) +} + // Delete the task and return the exit status func (t *Task) Delete(ctx context.Context) (*runtime.Exit, error) { rsp, err := t.shim.Delete(ctx, empty) @@ -124,11 +129,15 @@ func (t *Task) Start(ctx context.Context) error { t.pid = int(r.Pid) if !hasCgroup { cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(t.pid)) - if err != nil { + if err != nil && err != cgroups.ErrCgroupDeleted { return err } t.mu.Lock() - t.cg = cg + if err == cgroups.ErrCgroupDeleted { + t.cg = nil + } else { + t.cg = cg + } t.mu.Unlock() } t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{ diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go index 6cdd9cfc2cd14..7c68248c52948 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go @@ -26,6 +26,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "sync" "syscall" @@ -98,9 +99,9 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa cmd.Wait() exitHandler() if stdoutLog != nil { - stderrLog.Close() + stdoutLog.Close() } - if stdoutLog != nil { + if stderrLog != nil { stderrLog.Close() } }() @@ -110,7 +111,10 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa "debug": debug, }).Infof("shim %s started", binary) - if err := writeAddress(filepath.Join(config.Path, "address"), address); err != nil { + if err := writeFile(filepath.Join(config.Path, "address"), address); err != nil { + return nil, nil, err + } + if err := writeFile(filepath.Join(config.Path, "shim.pid"), strconv.Itoa(cmd.Process.Pid)); err != nil { return nil, nil, err } // set shim in cgroup if it is provided @@ -123,8 +127,8 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa "address": address, }).Infof("shim placed in cgroup %s", cgroup) } - if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil { - return nil, nil, errors.Wrap(err, "failed to set OOM Score on shim") + if err = setupOOMScore(cmd.Process.Pid); err != nil { + return nil, nil, err } c, clo, err := WithConnect(address, func() {})(ctx, config) if err != nil { @@ -134,6 +138,21 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa } } +// setupOOMScore gets containerd's oom score and adds +1 to it +// to ensure a shim has a lower* score than the daemons +func setupOOMScore(shimPid int) error { + pid := os.Getpid() + score, err := sys.GetOOMScoreAdj(pid) + if err != nil { + return errors.Wrap(err, "get daemon OOM score") + } + shimScore := score + 1 + if err := sys.SetOOMScore(shimPid, shimScore); err != nil { + return errors.Wrap(err, "set shim OOM score") + } + return nil +} + func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File, stdout, stderr io.Writer) (*exec.Cmd, error) { selfExe, err := os.Executable() if err != nil { @@ -172,8 +191,8 @@ func newCommand(binary, daemonAddress string, debug bool, config shim.Config, so return cmd, nil } -// writeAddress writes a address file atomically -func writeAddress(path, address string) error { +// writeFile writes a address file atomically +func writeFile(path, address string) error { path, err := filepath.Abs(path) if err != nil { return err @@ -279,7 +298,7 @@ func (c *Client) KillShim(ctx context.Context) error { return c.signalShim(ctx, unix.SIGKILL) } -// Close the cient connection +// Close the client connection func (c *Client) Close() error { if c.c == nil { return nil diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go index 4d2578ad085b9..a722ea1c23045 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go @@ -35,11 +35,12 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/process" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - rproc "github.com/containerd/containerd/runtime/proc" - "github.com/containerd/containerd/runtime/v1/linux/proc" shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" + "github.com/containerd/containerd/sys/reaper" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" @@ -84,9 +85,9 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { s := &Service{ config: config, context: ctx, - processes: make(map[string]rproc.Process), + processes: make(map[string]process.Process), events: make(chan interface{}, 128), - ec: Default.Subscribe(), + ec: reaper.Default.Subscribe(), } go s.processExits() if err := s.initPlatform(); err != nil { @@ -102,9 +103,9 @@ type Service struct { config Config context context.Context - processes map[string]rproc.Process + processes map[string]process.Process events chan interface{} - platform rproc.Platform + platform stdio.Platform ec chan runc.Exit // Filled by Create() @@ -114,9 +115,9 @@ type Service struct { // Create a new initial process and container with the underlying OCI runtime func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { - var mounts []proc.Mount + var mounts []process.Mount for _, m := range r.Rootfs { - mounts = append(mounts, proc.Mount{ + mounts = append(mounts, process.Mount{ Type: m.Type, Source: m.Source, Target: m.Target, @@ -127,12 +128,12 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * rootfs := "" if len(mounts) > 0 { rootfs = filepath.Join(r.Bundle, "rootfs") - if err := os.Mkdir(rootfs, 0711); err != nil { + if err := os.Mkdir(rootfs, 0711); err != nil && !os.IsExist(err) { return nil, err } } - config := &proc.CreateConfig{ + config := &process.CreateConfig{ ID: r.ID, Bundle: r.Bundle, Runtime: r.Runtime, @@ -266,7 +267,7 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*pty return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ + process, err := p.(*process.Init).Exec(ctx, s.config.Path, &process.ExecConfig{ ID: r.ID, Terminal: r.Terminal, Stdin: r.Stdin, @@ -348,7 +349,7 @@ func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, er if err != nil { return nil, err } - if err := p.(*proc.Init).Pause(ctx); err != nil { + if err := p.(*process.Init).Pause(ctx); err != nil { return nil, err } return empty, nil @@ -360,7 +361,7 @@ func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, e if err != nil { return nil, err } - if err := p.(*proc.Init).Resume(ctx); err != nil { + if err := p.(*process.Init).Resume(ctx); err != nil { return nil, err } return empty, nil @@ -448,7 +449,7 @@ func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskReque } options = *v.(*runctypes.CheckpointOptions) } - if err := p.(*proc.Init).Checkpoint(ctx, &proc.CheckpointConfig{ + if err := p.(*process.Init).Checkpoint(ctx, &process.CheckpointConfig{ Path: r.Path, Exit: options.Exit, AllowOpenTCP: options.OpenTcp, @@ -476,7 +477,7 @@ func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*pt if err != nil { return nil, err } - if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil { + if err := p.(*process.Init).Update(ctx, r.Resources); err != nil { return nil, errdefs.ToGRPC(err) } return empty, nil @@ -502,11 +503,11 @@ func (s *Service) processExits() { } } -func (s *Service) allProcesses() []rproc.Process { +func (s *Service) allProcesses() []process.Process { s.mu.Lock() defer s.mu.Unlock() - res := make([]rproc.Process, 0, len(s.processes)) + res := make([]process.Process, 0, len(s.processes)) for _, p := range s.processes { res = append(res, p) } @@ -514,33 +515,35 @@ func (s *Service) allProcesses() []rproc.Process { } func (s *Service) checkProcesses(e runc.Exit) { - shouldKillAll, err := shouldKillAllOnExit(s.bundle) - if err != nil { - log.G(s.context).WithError(err).Error("failed to check shouldKillAll") - } - for _, p := range s.allProcesses() { - if p.Pid() == e.Pid { + if p.Pid() != e.Pid { + continue + } + if ip, ok := p.(*process.Init); ok { + shouldKillAll, err := shouldKillAllOnExit(s.bundle) + if err != nil { + log.G(s.context).WithError(err).Error("failed to check shouldKillAll") + } + + // Ensure all children are killed if shouldKillAll { - if ip, ok := p.(*proc.Init); ok { - // Ensure all children are killed - if err := ip.KillAll(s.context); err != nil { - log.G(s.context).WithError(err).WithField("id", ip.ID()). - Error("failed to kill init's children") - } + if err := ip.KillAll(s.context); err != nil { + log.G(s.context).WithError(err).WithField("id", ip.ID()). + Error("failed to kill init's children") } } - p.SetExited(e.Status) - s.events <- &eventstypes.TaskExit{ - ContainerID: s.id, - ID: p.ID(), - Pid: uint32(e.Pid), - ExitStatus: uint32(e.Status), - ExitedAt: p.ExitedAt(), - } - return } + + p.SetExited(e.Status) + s.events <- &eventstypes.TaskExit{ + ContainerID: s.id, + ID: p.ID(), + Pid: uint32(e.Pid), + ExitStatus: uint32(e.Status), + ExitedAt: p.ExitedAt(), + } + return } } @@ -554,7 +557,7 @@ func shouldKillAllOnExit(bundlePath string) (bool, error) { if bundleSpec.Linux != nil { for _, ns := range bundleSpec.Linux.Namespaces { - if ns.Type == specs.PIDNamespace { + if ns.Type == specs.PIDNamespace && ns.Path == "" { return false, nil } } @@ -569,7 +572,7 @@ func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, er return nil, err } - ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) + ps, err := p.(*process.Init).Runtime().Ps(ctx, id) if err != nil { return nil, err } @@ -589,7 +592,7 @@ func (s *Service) forward(publisher events.Publisher) { } // getInitProcess returns initial process -func (s *Service) getInitProcess() (rproc.Process, error) { +func (s *Service) getInitProcess() (process.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -601,7 +604,7 @@ func (s *Service) getInitProcess() (rproc.Process, error) { } // getExecProcess returns exec process -func (s *Service) getExecProcess(id string) (rproc.Process, error) { +func (s *Service) getExecProcess(id string) (process.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -640,7 +643,7 @@ func getTopic(ctx context.Context, e interface{}) string { return runtime.TaskUnknownTopic } -func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform rproc.Platform, r *proc.CreateConfig, rootfs string) (*proc.Init, error) { +func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform stdio.Platform, r *process.CreateConfig, rootfs string) (*process.Init, error) { var options runctypes.CreateOptions if r.Options != nil { v, err := typeurl.UnmarshalAny(r.Options) @@ -650,8 +653,8 @@ func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu st options = *v.(*runctypes.CreateOptions) } - runtime := proc.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) - p := proc.New(r.ID, runtime, rproc.Stdio{ + runtime := process.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) + p := process.New(r.ID, runtime, stdio.Stdio{ Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, diff --git a/vendor/github.com/containerd/containerd/runtime/v2/README.md b/vendor/github.com/containerd/containerd/runtime/v2/README.md index 51dcafafa4078..76d30373fdfc6 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/README.md +++ b/vendor/github.com/containerd/containerd/runtime/v2/README.md @@ -183,7 +183,7 @@ Current supported schemes for logging are: * file - Linux & Windows * npipe - Windows -Binary logging has the abilty to forward a container's STDIO to an external binary for consumption. +Binary logging has the ability to forward a container's STDIO to an external binary for consumption. A sample logging driver that forwards the container's STDOUT and STDERR to `journald` is: ```go diff --git a/vendor/github.com/containerd/containerd/services/server/config/config.go b/vendor/github.com/containerd/containerd/services/server/config/config.go index 26fb92599307d..ff37716085c61 100644 --- a/vendor/github.com/containerd/containerd/services/server/config/config.go +++ b/vendor/github.com/containerd/containerd/services/server/config/config.go @@ -17,13 +17,21 @@ package config import ( + "path/filepath" + "strings" + "github.com/BurntSushi/toml" - "github.com/containerd/containerd/errdefs" + "github.com/imdario/mergo" "github.com/pkg/errors" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/plugin" ) // Config provides containerd configuration data for the server type Config struct { + // Version of the config file + Version int `toml:"version"` // Root is the path to a directory where containerd will store persistent data Root string `toml:"root"` // State is the path to a directory where containerd will store transient data @@ -32,6 +40,8 @@ type Config struct { PluginDir string `toml:"plugin_dir"` // GRPC configuration settings GRPC GRPCConfig `toml:"grpc"` + // TTRPC configuration settings + TTRPC TTRPCConfig `toml:"ttrpc"` // Debug and profiling settings Debug Debug `toml:"debug"` // Metrics and monitoring settings @@ -50,8 +60,55 @@ type Config struct { Cgroup CgroupConfig `toml:"cgroup"` // ProxyPlugins configures plugins which are communicated to over GRPC ProxyPlugins map[string]ProxyPlugin `toml:"proxy_plugins"` + // Timeouts specified as a duration + Timeouts map[string]string `toml:"timeouts"` + // Imports are additional file path list to config files that can overwrite main config file fields + Imports []string `toml:"imports"` + + StreamProcessors map[string]StreamProcessor `toml:"stream_processors"` +} + +// StreamProcessor provides configuration for diff content processors +type StreamProcessor struct { + // Accepts specific media-types + Accepts []string `toml:"accepts"` + // Returns the media-type + Returns string `toml:"returns"` + // Path or name of the binary + Path string `toml:"path"` + // Args to the binary + Args []string `toml:"args"` +} + +// GetVersion returns the config file's version +func (c *Config) GetVersion() int { + if c.Version == 0 { + return 1 + } + return c.Version +} - md toml.MetaData +// ValidateV2 validates the config for a v2 file +func (c *Config) ValidateV2() error { + if c.GetVersion() != 2 { + return nil + } + for _, p := range c.DisabledPlugins { + if len(strings.Split(p, ".")) < 4 { + return errors.Errorf("invalid disabled plugin URI %q expect io.containerd.x.vx", p) + } + } + for _, p := range c.RequiredPlugins { + if len(strings.Split(p, ".")) < 4 { + return errors.Errorf("invalid required plugin URI %q expect io.containerd.x.vx", p) + } + } + for p := range c.Plugins { + if len(strings.Split(p, ".")) < 4 { + return errors.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p) + } + } + return nil } // GRPCConfig provides GRPC configuration for the socket @@ -66,6 +123,13 @@ type GRPCConfig struct { MaxSendMsgSize int `toml:"max_send_message_size"` } +// TTRPCConfig provides TTRPC configuration for the socket +type TTRPCConfig struct { + Address string `toml:"address"` + UID int `toml:"uid"` + GID int `toml:"gid"` +} + // Debug provides debug configuration type Debug struct { Address string `toml:"address"` @@ -130,26 +194,156 @@ func (bc *BoltConfig) Validate() error { } // Decode unmarshals a plugin specific configuration by plugin id -func (c *Config) Decode(id string, v interface{}) (interface{}, error) { +func (c *Config) Decode(p *plugin.Registration) (interface{}, error) { + id := p.URI() + if c.GetVersion() == 1 { + id = p.ID + } data, ok := c.Plugins[id] if !ok { - return v, nil + return p.Config, nil } - if err := c.md.PrimitiveDecode(data, v); err != nil { + if err := toml.PrimitiveDecode(data, p.Config); err != nil { return nil, err } - return v, nil + return p.Config, nil } // LoadConfig loads the containerd server config from the provided path -func LoadConfig(path string, v *Config) error { - if v == nil { - return errors.Wrapf(errdefs.ErrInvalidArgument, "argument v must not be nil") +func LoadConfig(path string, out *Config) error { + if out == nil { + return errors.Wrapf(errdefs.ErrInvalidArgument, "argument out must not be nil") + } + + var ( + loaded = map[string]bool{} + pending = []string{path} + ) + + for len(pending) > 0 { + path, pending = pending[0], pending[1:] + + // Check if a file at the given path already loaded to prevent circular imports + if _, ok := loaded[path]; ok { + continue + } + + config, err := loadConfigFile(path) + if err != nil { + return err + } + + if err := mergeConfig(out, config); err != nil { + return err + } + + imports, err := resolveImports(path, config.Imports) + if err != nil { + return err + } + + loaded[path] = true + pending = append(pending, imports...) } - md, err := toml.DecodeFile(path, v) + + // Fix up the list of config files loaded + out.Imports = []string{} + for path := range loaded { + out.Imports = append(out.Imports, path) + } + + return out.ValidateV2() +} + +// loadConfigFile decodes a TOML file at the given path +func loadConfigFile(path string) (*Config, error) { + config := &Config{} + _, err := toml.DecodeFile(path, &config) + if err != nil { + return nil, err + } + return config, nil +} + +// resolveImports resolves import strings list to absolute paths list: +// - If path contains *, glob pattern matching applied +// - Non abs path is relative to parent config file directory +// - Abs paths returned as is +func resolveImports(parent string, imports []string) ([]string, error) { + var out []string + + for _, path := range imports { + if strings.Contains(path, "*") { + matches, err := filepath.Glob(path) + if err != nil { + return nil, err + } + + out = append(out, matches...) + } else { + path = filepath.Clean(path) + if !filepath.IsAbs(path) { + path = filepath.Join(filepath.Dir(parent), path) + } + + out = append(out, path) + } + } + + return out, nil +} + +// mergeConfig merges Config structs with the following rules: +// 'to' 'from' 'result' +// "" "value" "value" +// "value" "" "value" +// 1 0 1 +// 0 1 1 +// []{"1"} []{"2"} []{"1","2"} +// []{"1"} []{} []{"1"} +// Maps merged by keys, but values are replaced entirely. +func mergeConfig(to, from *Config) error { + err := mergo.Merge(to, from, mergo.WithOverride, mergo.WithAppendSlice) if err != nil { return err } - v.md = md + + // Replace entire sections instead of merging map's values. + for k, v := range from.Plugins { + to.Plugins[k] = v + } + + for k, v := range from.StreamProcessors { + to.StreamProcessors[k] = v + } + + for k, v := range from.ProxyPlugins { + to.ProxyPlugins[k] = v + } + return nil } + +// V1DisabledFilter matches based on ID +func V1DisabledFilter(list []string) plugin.DisableFilter { + set := make(map[string]struct{}, len(list)) + for _, l := range list { + set[l] = struct{}{} + } + return func(r *plugin.Registration) bool { + _, ok := set[r.ID] + return ok + } +} + +// V2DisabledFilter matches based on URI +func V2DisabledFilter(list []string) plugin.DisableFilter { + set := make(map[string]struct{}, len(list)) + for _, l := range list { + set[l] = struct{}{} + } + return func(r *plugin.Registration) bool { + _, ok := set[r.URI()] + return ok + } +} diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index b4af6a30863d1..514538f7ecc06 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -86,10 +86,15 @@ func (k *Kind) UnmarshalJSON(b []byte) error { // Info provides information about a particular snapshot. // JSON marshallability is supported for interactive with tools like ctr, type Info struct { - Kind Kind // active or committed snapshot - Name string // name or key of snapshot - Parent string `json:",omitempty"` // name of parent snapshot - Labels map[string]string `json:",omitempty"` // Labels for snapshot + Kind Kind // active or committed snapshot + Name string // name or key of snapshot + Parent string `json:",omitempty"` // name of parent snapshot + + // Labels for a snapshot. + // + // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited by the + // snapshotter's `Prepare`, `View`, or `Commit` calls. + Labels map[string]string `json:",omitempty"` Created time.Time `json:",omitempty"` // Created time Updated time.Time `json:",omitempty"` // Last update time } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/reaper.go b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go similarity index 56% rename from vendor/github.com/containerd/containerd/runtime/v1/shim/reaper.go rename to vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go index 45a88db12bab7..baab9740b2557 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/reaper.go +++ b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go @@ -16,7 +16,7 @@ limitations under the License. */ -package shim +package reaper import ( "os/exec" @@ -31,37 +31,61 @@ import ( // ErrNoSuchProcess is returned when the process no longer exists var ErrNoSuchProcess = errors.New("no such process") -const bufferSize = 2048 +const bufferSize = 32 + +type subscriber struct { + sync.Mutex + c chan runc.Exit + closed bool +} + +func (s *subscriber) close() { + s.Lock() + if s.closed { + s.Unlock() + return + } + close(s.c) + s.closed = true + s.Unlock() +} + +func (s *subscriber) do(fn func()) { + s.Lock() + fn() + s.Unlock() +} // Reap should be called when the process receives an SIGCHLD. Reap will reap // all exited processes and close their wait channels func Reap() error { now := time.Now() exits, err := sys.Reap(false) - Default.Lock() - for c := range Default.subscribers { - for _, e := range exits { - c <- runc.Exit{ - Timestamp: now, - Pid: e.Pid, - Status: e.Status, - } + for _, e := range exits { + done := Default.notify(runc.Exit{ + Timestamp: now, + Pid: e.Pid, + Status: e.Status, + }) + + select { + case <-done: + case <-time.After(1 * time.Second): } } - Default.Unlock() return err } // Default is the default monitor initialized for the package var Default = &Monitor{ - subscribers: make(map[chan runc.Exit]struct{}), + subscribers: make(map[chan runc.Exit]*subscriber), } // Monitor monitors the underlying system for process status changes type Monitor struct { sync.Mutex - subscribers map[chan runc.Exit]struct{} + subscribers map[chan runc.Exit]*subscriber } // Start starts the command a registers the process with the reaper @@ -95,7 +119,9 @@ func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { func (m *Monitor) Subscribe() chan runc.Exit { c := make(chan runc.Exit, bufferSize) m.Lock() - m.subscribers[c] = struct{}{} + m.subscribers[c] = &subscriber{ + c: c, + } m.Unlock() return c } @@ -103,7 +129,74 @@ func (m *Monitor) Subscribe() chan runc.Exit { // Unsubscribe to process exit changes func (m *Monitor) Unsubscribe(c chan runc.Exit) { m.Lock() + s, ok := m.subscribers[c] + if !ok { + m.Unlock() + return + } + s.close() delete(m.subscribers, c) - close(c) m.Unlock() } + +func (m *Monitor) getSubscribers() map[chan runc.Exit]*subscriber { + out := make(map[chan runc.Exit]*subscriber) + m.Lock() + for k, v := range m.subscribers { + out[k] = v + } + m.Unlock() + return out +} + +func (m *Monitor) notify(e runc.Exit) chan struct{} { + const timeout = 1 * time.Millisecond + var ( + done = make(chan struct{}, 1) + timer = time.NewTimer(timeout) + success = make(map[chan runc.Exit]struct{}) + ) + stop(timer, true) + + go func() { + defer close(done) + + for { + var ( + failed int + subscribers = m.getSubscribers() + ) + for _, s := range subscribers { + s.do(func() { + if s.closed { + return + } + if _, ok := success[s.c]; ok { + return + } + timer.Reset(timeout) + recv := true + select { + case s.c <- e: + success[s.c] = struct{}{} + case <-timer.C: + recv = false + failed++ + } + stop(timer, recv) + }) + } + // all subscribers received the message + if failed == 0 { + return + } + } + }() + return done +} + +func stop(timer *time.Timer, recv bool) { + if !timer.Stop() && recv { + <-timer.C + } +} diff --git a/vendor/github.com/containerd/containerd/task_opts_unix.go b/vendor/github.com/containerd/containerd/task_opts_unix.go index d3b51a76d1de7..8b498d47efc7d 100644 --- a/vendor/github.com/containerd/containerd/task_opts_unix.go +++ b/vendor/github.com/containerd/containerd/task_opts_unix.go @@ -77,3 +77,29 @@ func WithNoPivotRoot(_ context.Context, _ *Client, ti *TaskInfo) error { } return nil } + +// WithShimCgroup sets the existing cgroup for the shim +func WithShimCgroup(path string) NewTaskOpts { + return func(ctx context.Context, c *Client, ti *TaskInfo) error { + if CheckRuntime(ti.Runtime(), "io.containerd.runc") { + if ti.Options == nil { + ti.Options = &options.Options{} + } + opts, ok := ti.Options.(*options.Options) + if !ok { + return errors.New("invalid v2 shim create options format") + } + opts.ShimCgroup = path + } else { + if ti.Options == nil { + ti.Options = &runctypes.CreateOptions{} + } + opts, ok := ti.Options.(*runctypes.CreateOptions) + if !ok { + return errors.New("could not cast TaskInfo Options to CreateOptions") + } + opts.ShimCgroup = path + } + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/unpacker.go b/vendor/github.com/containerd/containerd/unpacker.go new file mode 100644 index 0000000000000..f7580ce855beb --- /dev/null +++ b/vendor/github.com/containerd/containerd/unpacker.go @@ -0,0 +1,243 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "sync/atomic" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/rootfs" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +type layerState struct { + layer rootfs.Layer + downloaded bool + unpacked bool +} + +type unpacker struct { + updateCh chan ocispec.Descriptor + snapshotter string + config UnpackConfig + c *Client +} + +func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacker, error) { + snapshotter, err := c.resolveSnapshotterName(ctx, rCtx.Snapshotter) + if err != nil { + return nil, err + } + var config UnpackConfig + for _, o := range rCtx.UnpackOpts { + if err := o(ctx, &config); err != nil { + return nil, err + } + } + return &unpacker{ + updateCh: make(chan ocispec.Descriptor, 128), + snapshotter: snapshotter, + config: config, + c: c, + }, nil +} + +func (u *unpacker) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor) error { + p, err := content.ReadBlob(ctx, u.c.ContentStore(), config) + if err != nil { + return err + } + + var i ocispec.Image + if err := json.Unmarshal(p, &i); err != nil { + return errors.Wrap(err, "unmarshal image config") + } + diffIDs := i.RootFS.DiffIDs + if len(layers) != len(diffIDs) { + return errors.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) + } + + var ( + sn = u.c.SnapshotService(u.snapshotter) + a = u.c.DiffService() + cs = u.c.ContentStore() + + states []layerState + chain []digest.Digest + ) + for i, desc := range layers { + states = append(states, layerState{ + layer: rootfs.Layer{ + Blob: desc, + Diff: ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Digest: diffIDs[i], + }, + }, + }) + } + for { + var layer ocispec.Descriptor + select { + case layer = <-u.updateCh: + case <-ctx.Done(): + return ctx.Err() + } + log.G(ctx).WithField("desc", layer).Debug("layer downloaded") + for i := range states { + if states[i].layer.Blob.Digest != layer.Digest { + continue + } + // Different layers may have the same digest. When that + // happens, we should continue marking the next layer + // as downloaded. + if states[i].downloaded { + continue + } + states[i].downloaded = true + break + } + for i := range states { + if !states[i].downloaded { + break + } + if states[i].unpacked { + continue + } + + log.G(ctx).WithFields(logrus.Fields{ + "desc": states[i].layer.Blob, + "diff": states[i].layer.Diff, + }).Debug("unpack layer") + + unpacked, err := rootfs.ApplyLayerWithOpts(ctx, states[i].layer, chain, sn, a, + u.config.SnapshotOpts, u.config.ApplyOpts) + if err != nil { + return err + } + + if unpacked { + // Set the uncompressed label after the uncompressed + // digest has been verified through apply. + cinfo := content.Info{ + Digest: states[i].layer.Blob.Digest, + Labels: map[string]string{ + "containerd.io/uncompressed": states[i].layer.Diff.Digest.String(), + }, + } + if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { + return err + } + } + + chain = append(chain, states[i].layer.Diff.Digest) + states[i].unpacked = true + log.G(ctx).WithFields(logrus.Fields{ + "desc": states[i].layer.Blob, + "diff": states[i].layer.Diff, + }).Debug("layer unpacked") + } + // Check whether all layers are unpacked. + if states[len(states)-1].unpacked { + break + } + } + + chainID := identity.ChainID(chain).String() + cinfo := content.Info{ + Digest: config.Digest, + Labels: map[string]string{ + fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", u.snapshotter): chainID, + }, + } + _, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", u.snapshotter)) + if err != nil { + return err + } + log.G(ctx).WithFields(logrus.Fields{ + "config": config.Digest, + "chainID": chainID, + }).Debug("image unpacked") + return nil +} + +func (u *unpacker) handlerWrapper(uctx context.Context, unpacks *int32) (func(images.Handler) images.Handler, *errgroup.Group) { + eg, uctx := errgroup.WithContext(uctx) + return func(f images.Handler) images.Handler { + var ( + lock sync.Mutex + layers []ocispec.Descriptor + schema1 bool + ) + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f.Handle(ctx, desc) + if err != nil { + return children, err + } + + // `Pull` only supports one platform, so there is only + // one manifest to handle, and manifest list can be + // safely skipped. + // TODO: support multi-platform unpack. + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema1Manifest: + lock.Lock() + schema1 = true + lock.Unlock() + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: + lock.Lock() + for _, child := range children { + if child.MediaType == images.MediaTypeDockerSchema2Config || + child.MediaType == ocispec.MediaTypeImageConfig { + continue + } + layers = append(layers, child) + } + lock.Unlock() + case mt == images.MediaTypeDockerSchema2Config || mt == ocispec.MediaTypeImageConfig: + lock.Lock() + l := append([]ocispec.Descriptor{}, layers...) + lock.Unlock() + if len(l) > 0 { + atomic.AddInt32(unpacks, 1) + eg.Go(func() error { + return u.unpack(uctx, desc, l) + }) + } + case images.IsLayerType(mt): + lock.Lock() + update := !schema1 + lock.Unlock() + if update { + u.updateCh <- desc + } + } + return children, nil + }) + }, eg +} diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index 46df00001cbdf..24724fffe6eb5 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -1,10 +1,10 @@ -github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 +github.com/containerd/go-runc e029b79d8cda8374981c64eba71f28ec38e5526f github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f -github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 +github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 -github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c +github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13 github.com/containerd/btrfs af5082808c833de0e79c1e72eea9fea239364877 -github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4 +github.com/containerd/continuity f2a389ac0a02ce21c09edd7344677a601970f41c github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 @@ -20,64 +20,70 @@ github.com/gogo/protobuf v1.2.1 github.com/gogo/googleapis v1.2.0 github.com/golang/protobuf v1.2.0 github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db -github.com/opencontainers/runc v1.0.0-rc8 +github.com/opencontainers/runc 3e425f80a8c931f88e6d94a8c831b9d5aa481657 # v1.0.0-rc8+ CVE-2019-16884 github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/sirupsen/logrus v1.4.1 -github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c -golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac -google.golang.org/grpc v1.12.0 +github.com/urfave/cli v1.22.0 +golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 +google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 github.com/pkg/errors v0.8.1 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 -golang.org/x/sys d455e41777fca6e8a5a79e34a14b8368bc11d9ba https://github.com/golang/sys +golang.org/x/sys 9eafafc0a87e0fd0aeeba439a4573537970c44c7 https://github.com/golang/sys github.com/opencontainers/image-spec v1.0.1 golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e github.com/BurntSushi/toml v0.3.1 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 -github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac -github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517 +github.com/Microsoft/go-winio v0.4.14 +github.com/Microsoft/hcsshim 9e921883ac929bbe515b39793ece99ce3a9d7706 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/containerd/ttrpc 699c4e40d1e7416e08bf7019c7ce2e9beced4636 +github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 gotest.tools v2.3.0 github.com/google/go-cmp v0.2.0 -go.etcd.io/bbolt v1.3.2 +go.etcd.io/bbolt v1.3.3 +github.com/hashicorp/errwrap v1.0.0 +github.com/hashicorp/go-multierror v1.0.0 +github.com/hashicorp/golang-lru v0.5.3 +go.opencensus.io v0.22.0 +github.com/imdario/mergo v0.3.7 +github.com/cpuguy83/go-md2man v1.0.10 +github.com/russross/blackfriday v1.5.2 # cri dependencies -github.com/containerd/cri 2fc62db8146ce66f27b37306ad5fda34207835f3 # master -github.com/containerd/go-cni 891c2a41e18144b2d7921f971d6c9789a68046b2 -github.com/containernetworking/cni v0.6.0 -github.com/containernetworking/plugins v0.7.0 -github.com/davecgh/go-spew v1.1.0 +github.com/containerd/cri 5d49e7e51b43e36a6b9c4386257c7d08c602237f # release/1.3 +github.com/containerd/go-cni 49fbd9b210f3c8ee3b7fd3cd797aabaf364627c1 +github.com/containernetworking/cni v0.7.1 +github.com/containernetworking/plugins v0.7.6 +github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 -github.com/emicklei/go-restful v2.2.1 -github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c -github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 -github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f -github.com/json-iterator/go 1.1.5 +github.com/emicklei/go-restful v2.9.5 +github.com/google/gofuzz v1.0.0 +github.com/json-iterator/go v1.1.7 github.com/modern-go/reflect2 1.0.1 github.com/modern-go/concurrent 1.0.3 github.com/opencontainers/selinux v1.2.2 -github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +github.com/seccomp/libseccomp-golang v0.9.1 github.com/tchap/go-patricia v2.2.6 -golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3 -golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4 -golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 -gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 -gopkg.in/yaml.v2 v2.2.1 -k8s.io/api kubernetes-1.15.0-alpha.0 -k8s.io/apimachinery kubernetes-1.15.0-alpha.0 -k8s.io/apiserver kubernetes-1.15.0-alpha.0 -k8s.io/client-go kubernetes-1.15.0-alpha.0 -k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f -k8s.io/kubernetes v1.15.0-alpha.0 +golang.org/x/crypto 5c40567a22f818bd14a1ea7245dad9f8ef0691aa +golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33 +golang.org/x/time 85acf8d2951cb2a3bde7632f9ff273ef0379bcbd +gopkg.in/inf.v0 v0.9.0 +gopkg.in/yaml.v2 v2.2.2 +k8s.io/api kubernetes-1.16.0-rc.2 +k8s.io/apimachinery kubernetes-1.16.0-rc.2 +k8s.io/apiserver kubernetes-1.16.0-rc.2 +k8s.io/cri-api kubernetes-1.16.0-rc.2 +k8s.io/client-go kubernetes-1.16.0-rc.2 +k8s.io/klog v0.4.0 +k8s.io/kubernetes v1.16.0-rc.2 k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c sigs.k8s.io/yaml v1.1.0 # zfs dependencies -github.com/containerd/zfs 31af176f2ae84fe142ef2655bf7bb2aa618b3b1f +github.com/containerd/zfs 2ceb2dbb8154202ed1b8fd32e4ea25b491d7b251 github.com/mistifyio/go-zfs f784269be439d704d3dfa1906f45dd848fed2beb github.com/google/uuid v1.1.1 diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index b2874bf62c75d..04cf59c87e741 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -21,7 +21,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.2.0+unknown" + Version = "1.3.0+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/continuity/context.go b/vendor/github.com/containerd/continuity/context.go deleted file mode 100644 index 75c98594ac68f..0000000000000 --- a/vendor/github.com/containerd/continuity/context.go +++ /dev/null @@ -1,673 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/containerd/continuity/devices" - driverpkg "github.com/containerd/continuity/driver" - "github.com/containerd/continuity/pathdriver" - - "github.com/opencontainers/go-digest" -) - -var ( - // ErrNotFound represents the resource not found - ErrNotFound = fmt.Errorf("not found") - // ErrNotSupported represents the resource not supported - ErrNotSupported = fmt.Errorf("not supported") -) - -// Context represents a file system context for accessing resources. The -// responsibility of the context is to convert system specific resources to -// generic Resource objects. Most of this is safe path manipulation, as well -// as extraction of resource details. -type Context interface { - Apply(Resource) error - Verify(Resource) error - Resource(string, os.FileInfo) (Resource, error) - Walk(filepath.WalkFunc) error -} - -// SymlinkPath is intended to give the symlink target value -// in a root context. Target and linkname are absolute paths -// not under the given root. -type SymlinkPath func(root, linkname, target string) (string, error) - -// ContextOptions represents options to create a new context. -type ContextOptions struct { - Digester Digester - Driver driverpkg.Driver - PathDriver pathdriver.PathDriver - Provider ContentProvider -} - -// context represents a file system context for accessing resources. -// Generally, all path qualified access and system considerations should land -// here. -type context struct { - driver driverpkg.Driver - pathDriver pathdriver.PathDriver - root string - digester Digester - provider ContentProvider -} - -// NewContext returns a Context associated with root. The default driver will -// be used, as returned by NewDriver. -func NewContext(root string) (Context, error) { - return NewContextWithOptions(root, ContextOptions{}) -} - -// NewContextWithOptions returns a Context associate with the root. -func NewContextWithOptions(root string, options ContextOptions) (Context, error) { - // normalize to absolute path - pathDriver := options.PathDriver - if pathDriver == nil { - pathDriver = pathdriver.LocalPathDriver - } - - root = pathDriver.FromSlash(root) - root, err := pathDriver.Abs(pathDriver.Clean(root)) - if err != nil { - return nil, err - } - - driver := options.Driver - if driver == nil { - driver, err = driverpkg.NewSystemDriver() - if err != nil { - return nil, err - } - } - - digester := options.Digester - if digester == nil { - digester = simpleDigester{digest.Canonical} - } - - // Check the root directory. Need to be a little careful here. We are - // allowing a link for now, but this may have odd behavior when - // canonicalizing paths. As long as all files are opened through the link - // path, this should be okay. - fi, err := driver.Stat(root) - if err != nil { - return nil, err - } - - if !fi.IsDir() { - return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid} - } - - return &context{ - root: root, - driver: driver, - pathDriver: pathDriver, - digester: digester, - provider: options.Provider, - }, nil -} - -// Resource returns the resource as path p, populating the entry with info -// from fi. The path p should be the path of the resource in the context, -// typically obtained through Walk or from the value of Resource.Path(). If fi -// is nil, it will be resolved. -func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) { - fp, err := c.fullpath(p) - if err != nil { - return nil, err - } - - if fi == nil { - fi, err = c.driver.Lstat(fp) - if err != nil { - return nil, err - } - } - - base, err := newBaseResource(p, fi) - if err != nil { - return nil, err - } - - base.xattrs, err = c.resolveXAttrs(fp, fi, base) - if err == ErrNotSupported { - log.Printf("resolving xattrs on %s not supported", fp) - } else if err != nil { - return nil, err - } - - // TODO(stevvooe): Handle windows alternate data streams. - - if fi.Mode().IsRegular() { - dgst, err := c.digest(p) - if err != nil { - return nil, err - } - - return newRegularFile(*base, base.paths, fi.Size(), dgst) - } - - if fi.Mode().IsDir() { - return newDirectory(*base) - } - - if fi.Mode()&os.ModeSymlink != 0 { - // We handle relative links vs absolute links by including a - // beginning slash for absolute links. Effectively, the bundle's - // root is treated as the absolute link anchor. - target, err := c.driver.Readlink(fp) - if err != nil { - return nil, err - } - - return newSymLink(*base, target) - } - - if fi.Mode()&os.ModeNamedPipe != 0 { - return newNamedPipe(*base, base.paths) - } - - if fi.Mode()&os.ModeDevice != 0 { - deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver) - if !ok { - log.Printf("device extraction not supported %s", fp) - return nil, ErrNotSupported - } - - // character and block devices merely need to recover the - // major/minor device number. - major, minor, err := deviceDriver.DeviceInfo(fi) - if err != nil { - return nil, err - } - - return newDevice(*base, base.paths, major, minor) - } - - log.Printf("%q (%v) is not supported", fp, fi.Mode()) - return nil, ErrNotFound -} - -func (c *context) verifyMetadata(resource, target Resource) error { - if target.Mode() != resource.Mode() { - return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode()) - } - - if target.UID() != resource.UID() { - return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID()) - } - - if target.GID() != resource.GID() { - return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID()) - } - - if xattrer, ok := resource.(XAttrer); ok { - txattrer, tok := target.(XAttrer) - if !tok { - return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path()) - } - - // For xattrs, only ensure that we have those defined in the resource - // and their values match. We can ignore other xattrs. In other words, - // we only verify that target has the subset defined by resource. - txattrs := txattrer.XAttrs() - for attr, value := range xattrer.XAttrs() { - tvalue, ok := txattrs[attr] - if !ok { - return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr) - } - - if !bytes.Equal(value, tvalue) { - return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path()) - } - } - } - - switch r := resource.(type) { - case RegularFile: - // TODO(stevvooe): Another reason to use a record-based approach. We - // have to do another type switch to get this to work. This could be - // fixed with an Equal function, but let's study this a little more to - // be sure. - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - if t.Size() != r.Size() { - return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size()) - } - case Directory: - t, ok := target.(Directory) - if !ok { - return fmt.Errorf("resource %q target not a directory", t.Path()) - } - case SymLink: - t, ok := target.(SymLink) - if !ok { - return fmt.Errorf("resource %q target not a symlink", t.Path()) - } - - if t.Target() != r.Target() { - return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target()) - } - case Device: - t, ok := target.(Device) - if !ok { - return fmt.Errorf("resource %q is not a device", t.Path()) - } - - if t.Major() != r.Major() || t.Minor() != r.Minor() { - return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor()) - } - case NamedPipe: - t, ok := target.(NamedPipe) - if !ok { - return fmt.Errorf("resource %q is not a named pipe", t.Path()) - } - default: - return fmt.Errorf("cannot verify resource: %v", resource) - } - - return nil -} - -// Verify the resource in the context. An error will be returned a discrepancy -// is found. -func (c *context) Verify(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - fi, err := c.driver.Lstat(fp) - if err != nil { - return err - } - - target, err := c.Resource(resource.Path(), fi) - if err != nil { - return err - } - - if target.Path() != resource.Path() { - return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path()) - } - - if err := c.verifyMetadata(resource, target); err != nil { - return err - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - hardlinkKey, err := newHardlinkKey(fi) - if err == errNotAHardLink { - if len(h.Paths()) > 1 { - return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path()) - } - } else if err != nil { - return err - } - - for _, path := range h.Paths()[1:] { - fpLink, err := c.fullpath(path) - if err != nil { - return err - } - - fiLink, err := c.driver.Lstat(fpLink) - if err != nil { - return err - } - - targetLink, err := c.Resource(path, fiLink) - if err != nil { - return err - } - - hardlinkKeyLink, err := newHardlinkKey(fiLink) - if err != nil { - return err - } - - if hardlinkKeyLink != hardlinkKey { - return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path()) - } - - if err := c.verifyMetadata(resource, targetLink); err != nil { - return err - } - } - } - - switch r := resource.(type) { - case RegularFile: - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - // TODO(stevvooe): This may need to get a little more sophisticated - // for digest comparison. We may want to actually calculate the - // provided digests, rather than the implementations having an - // overlap. - if !digestsMatch(t.Digests(), r.Digests()) { - return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests()) - } - } - - return nil -} - -func (c *context) checkoutFile(fp string, rf RegularFile) error { - if c.provider == nil { - return fmt.Errorf("no file provider") - } - var ( - r io.ReadCloser - err error - ) - for _, dgst := range rf.Digests() { - r, err = c.provider.Reader(dgst) - if err == nil { - break - } - } - if err != nil { - return fmt.Errorf("file content could not be provided: %v", err) - } - defer r.Close() - - return atomicWriteFile(fp, r, rf.Size(), rf.Mode()) -} - -// Apply the resource to the contexts. An error will be returned if the -// operation fails. Depending on the resource type, the resource may be -// created. For resource that cannot be resolved, an error will be returned. -func (c *context) Apply(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - if !strings.HasPrefix(fp, c.root) { - return fmt.Errorf("resource %v escapes root", resource) - } - - var chmod = true - fi, err := c.driver.Lstat(fp) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - switch r := resource.(type) { - case RegularFile: - if fi == nil { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - chmod = false - } else { - if !fi.Mode().IsRegular() { - return fmt.Errorf("file %q should be a regular file, but is not", resource.Path()) - } - if fi.Size() != r.Size() { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - } else { - for _, dgst := range r.Digests() { - f, err := os.Open(fp) - if err != nil { - return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err) - } - compared, err := dgst.Algorithm().FromReader(f) - if err == nil && dgst != compared { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - break - } - if err1 := f.Close(); err == nil { - err = err1 - } - if err != nil { - return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err) - } - } - } - } - case Directory: - if fi == nil { - if err := c.driver.Mkdir(fp, resource.Mode()); err != nil { - return err - } - } else if !fi.Mode().IsDir() { - return fmt.Errorf("%q should be a directory, but is not", resource.Path()) - } - - case SymLink: - var target string // only possibly set if target resource is a symlink - - if fi != nil { - if fi.Mode()&os.ModeSymlink != 0 { - target, err = c.driver.Readlink(fp) - if err != nil { - return err - } - } - } - - if target != r.Target() { - if fi != nil { - if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory? - return err - } - } - - if err := c.driver.Symlink(r.Target(), fp); err != nil { - return err - } - } - - case Device: - if fi == nil { - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } else if (fi.Mode() & os.ModeDevice) == 0 { - return fmt.Errorf("%q should be a device, but is not", resource.Path()) - } else { - major, minor, err := devices.DeviceInfo(fi) - if err != nil { - return err - } - if major != r.Major() || minor != r.Minor() { - if err := c.driver.Remove(fp); err != nil { - return err - } - - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } - } - - case NamedPipe: - if fi == nil { - if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil { - return err - } - } else if (fi.Mode() & os.ModeNamedPipe) == 0 { - return fmt.Errorf("%q should be a named pipe, but is not", resource.Path()) - } - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - for _, path := range h.Paths() { - if path == resource.Path() { - continue - } - - lp, err := c.fullpath(path) - if err != nil { - return err - } - - if _, fi := c.driver.Lstat(lp); fi == nil { - c.driver.Remove(lp) - } - if err := c.driver.Link(fp, lp); err != nil { - return err - } - } - } - - // Update filemode if file was not created - if chmod { - if err := c.driver.Lchmod(fp, resource.Mode()); err != nil { - return err - } - } - - if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil { - return err - } - - if xattrer, ok := resource.(XAttrer); ok { - // For xattrs, only ensure that we have those defined in the resource - // and their values are set. We can ignore other xattrs. In other words, - // we only set xattres defined by resource but never remove. - - if _, ok := resource.(SymLink); ok { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path()) - } - if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } else { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - return fmt.Errorf("unsupported xattr for resource %q", resource.Path()) - } - if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } - } - - return nil -} - -// Walk provides a convenience function to call filepath.Walk correctly for -// the context. Otherwise identical to filepath.Walk, the path argument is -// corrected to be contained within the context. -func (c *context) Walk(fn filepath.WalkFunc) error { - root := c.root - fi, err := c.driver.Lstat(c.root) - if err == nil && fi.Mode()&os.ModeSymlink != 0 { - root, err = c.driver.Readlink(c.root) - if err != nil { - return err - } - } - return c.pathDriver.Walk(root, func(p string, fi os.FileInfo, err error) error { - contained, err := c.containWithRoot(p, root) - return fn(contained, fi, err) - }) -} - -// fullpath returns the system path for the resource, joined with the context -// root. The path p must be a part of the context. -func (c *context) fullpath(p string) (string, error) { - p = c.pathDriver.Join(c.root, p) - if !strings.HasPrefix(p, c.root) { - return "", fmt.Errorf("invalid context path") - } - - return p, nil -} - -// contain cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the context root. -func (c *context) contain(p string) (string, error) { - return c.containWithRoot(p, c.root) -} - -// containWithRoot cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the passed root. Extra care should be used when calling this -// instead of contain. This is needed for Walk, as if context root is a symlink, -// it must be evaluated prior to the Walk -func (c *context) containWithRoot(p string, root string) (string, error) { - sanitized, err := c.pathDriver.Rel(root, p) - if err != nil { - return "", err - } - - // ZOMBIES(stevvooe): In certain cases, we may want to remap these to a - // "containment error", so the caller can decide what to do. - return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil -} - -// digest returns the digest of the file at path p, relative to the root. -func (c *context) digest(p string) (digest.Digest, error) { - f, err := c.driver.Open(c.pathDriver.Join(c.root, p)) - if err != nil { - return "", err - } - defer f.Close() - - return c.digester.Digest(f) -} - -// resolveXAttrs attempts to resolve the extended attributes for the resource -// at the path fp, which is the full path to the resource. If the resource -// cannot have xattrs, nil will be returned. -func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) { - if fi.Mode().IsRegular() || fi.Mode().IsDir() { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - log.Println("xattr extraction not supported") - return nil, ErrNotSupported - } - - return xattrDriver.Getxattr(fp) - } - - if fi.Mode()&os.ModeSymlink != 0 { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - log.Println("xattr extraction for symlinks not supported") - return nil, ErrNotSupported - } - - return lxattrDriver.LGetxattr(fp) - } - - return nil, nil -} diff --git a/vendor/github.com/containerd/continuity/digests.go b/vendor/github.com/containerd/continuity/digests.go deleted file mode 100644 index bf92275dbd366..0000000000000 --- a/vendor/github.com/containerd/continuity/digests.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "io" - "sort" - - "github.com/opencontainers/go-digest" -) - -// Digester produces a digest for a given read stream -type Digester interface { - Digest(io.Reader) (digest.Digest, error) -} - -// ContentProvider produces a read stream for a given digest -type ContentProvider interface { - Reader(digest.Digest) (io.ReadCloser, error) -} - -type simpleDigester struct { - algorithm digest.Algorithm -} - -func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) { - digester := sd.algorithm.Digester() - - if _, err := io.Copy(digester.Hash(), r); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the -// digests are not repeated and no two digests with the same algorithm have -// different values. Because a stable sort is used, this has the effect of -// "zipping" digest collections from multiple resources. -func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) { - sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here. - seen := map[digest.Digest]struct{}{} - algs := map[digest.Algorithm][]digest.Digest{} // detect different digests. - - var out []digest.Digest - // uniqify the digests - for _, d := range digests { - if _, ok := seen[d]; ok { - continue - } - - seen[d] = struct{}{} - algs[d.Algorithm()] = append(algs[d.Algorithm()], d) - - if len(algs[d.Algorithm()]) > 1 { - return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm()) - } - - out = append(out, d) - } - - return out, nil -} - -// digestsMatch compares the two sets of digests to see if they match. -func digestsMatch(as, bs []digest.Digest) bool { - all := append(as, bs...) - - uniqified, err := uniqifyDigests(all...) - if err != nil { - // the only error uniqifyDigests returns is when the digests disagree. - return false - } - - disjoint := len(as) + len(bs) - if len(uniqified) == disjoint { - // if these two sets have the same cardinality, we know both sides - // didn't share any digests. - return false - } - - return true -} - -type digestSlice []digest.Digest - -func (p digestSlice) Len() int { return len(p) } -func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p digestSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/containerd/continuity/groups_unix.go b/vendor/github.com/containerd/continuity/groups_unix.go deleted file mode 100644 index 022d8ab783911..0000000000000 --- a/vendor/github.com/containerd/continuity/groups_unix.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// TODO(stevvooe): This needs a lot of work before we can call it useful. - -type groupIndex struct { - byName map[string]*group - byGID map[int]*group -} - -func getGroupIndex() (*groupIndex, error) { - f, err := os.Open("/etc/group") - if err != nil { - return nil, err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return nil, err - } - - return newGroupIndex(groups), nil -} - -func newGroupIndex(groups []group) *groupIndex { - gi := &groupIndex{ - byName: make(map[string]*group), - byGID: make(map[int]*group), - } - - for i, group := range groups { - gi.byGID[group.gid] = &groups[i] - gi.byName[group.name] = &groups[i] - } - - return gi -} - -type group struct { - name string - gid int - members []string -} - -func getGroupName(gid int) (string, error) { - f, err := os.Open("/etc/group") - if err != nil { - return "", err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return "", err - } - - for _, group := range groups { - if group.gid == gid { - return group.name, nil - } - } - - return "", fmt.Errorf("no group for gid") -} - -// parseGroups parses an /etc/group file for group names, ids and membership. -// This is unix specific. -func parseGroups(rd io.Reader) ([]group, error) { - var groups []group - scanner := bufio.NewScanner(rd) - - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "#") { - continue // skip comment - } - - parts := strings.SplitN(scanner.Text(), ":", 4) - - if len(parts) != 4 { - return nil, fmt.Errorf("bad entry: %q", scanner.Text()) - } - - name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3] - - gid, err := strconv.Atoi(sgid) - if err != nil { - return nil, fmt.Errorf("bad gid: %q", gid) - } - - members := strings.Split(smembers, ",") - - groups = append(groups, group{ - name: name, - gid: gid, - members: members, - }) - } - - if scanner.Err() != nil { - return nil, scanner.Err() - } - - return groups, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks.go b/vendor/github.com/containerd/continuity/hardlinks.go deleted file mode 100644 index d493dd7776bc7..0000000000000 --- a/vendor/github.com/containerd/continuity/hardlinks.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" -) - -var ( - errNotAHardLink = fmt.Errorf("invalid hardlink") -) - -type hardlinkManager struct { - hardlinks map[hardlinkKey][]Resource -} - -func newHardlinkManager() *hardlinkManager { - return &hardlinkManager{ - hardlinks: map[hardlinkKey][]Resource{}, - } -} - -// Add attempts to add the resource to the hardlink manager. If the resource -// cannot be considered as a hardlink candidate, errNotAHardLink is returned. -func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error { - if _, ok := resource.(Hardlinkable); !ok { - return errNotAHardLink - } - - key, err := newHardlinkKey(fi) - if err != nil { - return err - } - - hlm.hardlinks[key] = append(hlm.hardlinks[key], resource) - - return nil -} - -// Merge processes the current state of the hardlink manager and merges any -// shared nodes into hardlinked resources. -func (hlm *hardlinkManager) Merge() ([]Resource, error) { - var resources []Resource - for key, linked := range hlm.hardlinks { - if len(linked) < 1 { - return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key) - } - - merged, err := Merge(linked...) - if err != nil { - return nil, fmt.Errorf("error merging hardlink: %v", err) - } - - resources = append(resources, merged) - } - - return resources, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_unix.go b/vendor/github.com/containerd/continuity/hardlinks_unix.go deleted file mode 100644 index a15d1759ee6ce..0000000000000 --- a/vendor/github.com/containerd/continuity/hardlinks_unix.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// hardlinkKey provides a tuple-key for managing hardlinks. This is system- -// specific. -type hardlinkKey struct { - dev uint64 - inode uint64 -} - -// newHardlinkKey returns a hardlink key for the provided file info. If the -// resource does not represent a possible hardlink, errNotAHardLink will be -// returned. -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo") - } - - if sys.Nlink < 2 { - // NOTE(stevvooe): This is not always true for all filesystems. We - // should somehow detect this and provided a slow "polyfill" that - // leverages os.SameFile if we detect a filesystem where link counts - // is not really supported. - return hardlinkKey{}, errNotAHardLink - } - - return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil -} diff --git a/vendor/github.com/containerd/continuity/ioutils.go b/vendor/github.com/containerd/continuity/ioutils.go deleted file mode 100644 index 503640ebfc86b..0000000000000 --- a/vendor/github.com/containerd/continuity/ioutils.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// AtomicWriteFile atomically writes data to a file by first writing to a -// temp file and calling rename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - buf := bytes.NewBuffer(data) - return atomicWriteFile(filename, buf, int64(len(data)), perm) -} - -// atomicWriteFile writes data to a file by first writing to a temp -// file and calling rename. -func atomicWriteFile(filename string, r io.Reader, dataSize int64, perm os.FileMode) error { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return err - } - err = os.Chmod(f.Name(), perm) - if err != nil { - f.Close() - return err - } - n, err := io.Copy(f, r) - if err == nil && n < dataSize { - f.Close() - return io.ErrShortWrite - } - if err != nil { - f.Close() - return err - } - if err := f.Sync(); err != nil { - f.Close() - return err - } - if err := f.Close(); err != nil { - return err - } - return os.Rename(f.Name(), filename) -} diff --git a/vendor/github.com/containerd/continuity/manifest.go b/vendor/github.com/containerd/continuity/manifest.go deleted file mode 100644 index 8074bbfbb1e22..0000000000000 --- a/vendor/github.com/containerd/continuity/manifest.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "io" - "log" - "os" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/golang/protobuf/proto" -) - -// Manifest provides the contents of a manifest. Users of this struct should -// not typically modify any fields directly. -type Manifest struct { - // Resources specifies all the resources for a manifest in order by path. - Resources []Resource -} - -func Unmarshal(p []byte) (*Manifest, error) { - var bm pb.Manifest - - if err := proto.Unmarshal(p, &bm); err != nil { - return nil, err - } - - var m Manifest - for _, b := range bm.Resource { - r, err := fromProto(b) - if err != nil { - return nil, err - } - - m.Resources = append(m.Resources, r) - } - - return &m, nil -} - -func Marshal(m *Manifest) ([]byte, error) { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.Marshal(&bm) -} - -func MarshalText(w io.Writer, m *Manifest) error { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.MarshalText(w, &bm) -} - -// BuildManifest creates the manifest for the given context -func BuildManifest(ctx Context) (*Manifest, error) { - resourcesByPath := map[string]Resource{} - hardlinks := newHardlinkManager() - - if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("error walking %s: %v", p, err) - } - - if p == string(os.PathSeparator) { - // skip root - return nil - } - - resource, err := ctx.Resource(p, fi) - if err != nil { - if err == ErrNotFound { - return nil - } - log.Printf("error getting resource %q: %v", p, err) - return err - } - - // add to the hardlink manager - if err := hardlinks.Add(fi, resource); err == nil { - // Resource has been accepted by hardlink manager so we don't add - // it to the resourcesByPath until we merge at the end. - return nil - } else if err != errNotAHardLink { - // handle any other case where we have a proper error. - return fmt.Errorf("adding hardlink %s: %v", p, err) - } - - resourcesByPath[p] = resource - - return nil - }); err != nil { - return nil, err - } - - // merge and post-process the hardlinks. - hardlinked, err := hardlinks.Merge() - if err != nil { - return nil, err - } - - for _, resource := range hardlinked { - resourcesByPath[resource.Path()] = resource - } - - var resources []Resource - for _, resource := range resourcesByPath { - resources = append(resources, resource) - } - - sort.Stable(ByPath(resources)) - - return &Manifest{ - Resources: resources, - }, nil -} - -// VerifyManifest verifies all the resources in a manifest -// against files from the given context. -func VerifyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Verify(resource); err != nil { - return err - } - } - - return nil -} - -// ApplyManifest applies on the resources in a manifest to -// the given context. -func ApplyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Apply(resource); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containerd/continuity/proto/manifest.pb.go b/vendor/github.com/containerd/continuity/proto/manifest.pb.go deleted file mode 100644 index 24317766257ad..0000000000000 --- a/vendor/github.com/containerd/continuity/proto/manifest.pb.go +++ /dev/null @@ -1,181 +0,0 @@ -// Code generated by protoc-gen-go. -// source: manifest.proto -// DO NOT EDIT! - -/* -Package proto is a generated protocol buffer package. - -It is generated from these files: - manifest.proto - -It has these top-level messages: - Manifest - Resource - XAttr - ADSEntry -*/ -package proto - -import proto1 "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto1.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package - -// Manifest specifies the entries in a container bundle, keyed and sorted by -// path. -type Manifest struct { - Resource []*Resource `protobuf:"bytes,1,rep,name=resource" json:"resource,omitempty"` -} - -func (m *Manifest) Reset() { *m = Manifest{} } -func (m *Manifest) String() string { return proto1.CompactTextString(m) } -func (*Manifest) ProtoMessage() {} -func (*Manifest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Manifest) GetResource() []*Resource { - if m != nil { - return m.Resource - } - return nil -} - -type Resource struct { - // Path specifies the path from the bundle root. If more than one - // path is present, the entry may represent a hardlink, rather than using - // a link target. The path format is operating system specific. - Path []string `protobuf:"bytes,1,rep,name=path" json:"path,omitempty"` - // Uid specifies the user id for the resource. - Uid int64 `protobuf:"varint,2,opt,name=uid" json:"uid,omitempty"` - // Gid specifies the group id for the resource. - Gid int64 `protobuf:"varint,3,opt,name=gid" json:"gid,omitempty"` - // user and group are not currently used but their field numbers have been - // reserved for future use. As such, they are marked as deprecated. - User string `protobuf:"bytes,4,opt,name=user" json:"user,omitempty"` - Group string `protobuf:"bytes,5,opt,name=group" json:"group,omitempty"` - // Mode defines the file mode and permissions. We've used the same - // bit-packing from Go's os package, - // http://golang.org/pkg/os/#FileMode, since they've done the work of - // creating a cross-platform layout. - Mode uint32 `protobuf:"varint,6,opt,name=mode" json:"mode,omitempty"` - // Size specifies the size in bytes of the resource. This is only valid - // for regular files. - Size uint64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"` - // Digest specifies the content digest of the target file. Only valid for - // regular files. The strings are formatted in OCI style, i.e. :. - // For detailed information about the format, please refer to OCI Image Spec: - // https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification - // The digests are sorted in lexical order and implementations may choose - // which algorithms they prefer. - Digest []string `protobuf:"bytes,8,rep,name=digest" json:"digest,omitempty"` - // Target defines the target of a hard or soft link. Absolute links start - // with a slash and specify the resource relative to the bundle root. - // Relative links do not start with a slash and are relative to the - // resource path. - Target string `protobuf:"bytes,9,opt,name=target" json:"target,omitempty"` - // Major specifies the major device number for character and block devices. - Major uint64 `protobuf:"varint,10,opt,name=major" json:"major,omitempty"` - // Minor specifies the minor device number for character and block devices. - Minor uint64 `protobuf:"varint,11,opt,name=minor" json:"minor,omitempty"` - // Xattr provides storage for extended attributes for the target resource. - Xattr []*XAttr `protobuf:"bytes,12,rep,name=xattr" json:"xattr,omitempty"` - // Ads stores one or more alternate data streams for the target resource. - Ads []*ADSEntry `protobuf:"bytes,13,rep,name=ads" json:"ads,omitempty"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto1.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Resource) GetXattr() []*XAttr { - if m != nil { - return m.Xattr - } - return nil -} - -func (m *Resource) GetAds() []*ADSEntry { - if m != nil { - return m.Ads - } - return nil -} - -// XAttr encodes extended attributes for a resource. -type XAttr struct { - // Name specifies the attribute name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Data specifies the associated data for the attribute. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *XAttr) Reset() { *m = XAttr{} } -func (m *XAttr) String() string { return proto1.CompactTextString(m) } -func (*XAttr) ProtoMessage() {} -func (*XAttr) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -// ADSEntry encodes information for a Windows Alternate Data Stream. -type ADSEntry struct { - // Name specifices the stream name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Data specifies the stream data. - // See also the description about the digest below. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - // Digest is a CAS representation of the stream data. - // - // At least one of data or digest MUST be specified, and either one of them - // SHOULD be specified. - // - // How to access the actual data using the digest is implementation-specific, - // and implementations can choose not to implement digest. - // So, digest SHOULD be used only when the stream data is large. - Digest string `protobuf:"bytes,3,opt,name=digest" json:"digest,omitempty"` -} - -func (m *ADSEntry) Reset() { *m = ADSEntry{} } -func (m *ADSEntry) String() string { return proto1.CompactTextString(m) } -func (*ADSEntry) ProtoMessage() {} -func (*ADSEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func init() { - proto1.RegisterType((*Manifest)(nil), "proto.Manifest") - proto1.RegisterType((*Resource)(nil), "proto.Resource") - proto1.RegisterType((*XAttr)(nil), "proto.XAttr") - proto1.RegisterType((*ADSEntry)(nil), "proto.ADSEntry") -} - -func init() { proto1.RegisterFile("manifest.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, - 0x10, 0xc6, 0x49, 0x93, 0xf4, 0x4d, 0xa7, 0xed, 0xab, 0x2c, 0x52, 0xe6, 0x18, 0x73, 0x0a, 0x08, - 0x15, 0xf4, 0xe0, 0xb9, 0xa2, 0x17, 0xc1, 0xcb, 0x7a, 0xf1, 0xba, 0xba, 0x6b, 0x5c, 0x21, 0xd9, - 0xb0, 0xd9, 0x80, 0xfa, 0xe5, 0xfc, 0x6a, 0x32, 0xb3, 0x69, 0xd1, 0x9b, 0xa7, 0x3c, 0xcf, 0x6f, - 0xfe, 0x64, 0xf6, 0x81, 0xff, 0xad, 0xea, 0xec, 0x8b, 0x19, 0xc2, 0xb6, 0xf7, 0x2e, 0x38, 0x91, - 0xf3, 0xa7, 0xba, 0x82, 0xe2, 0x7e, 0x2a, 0x88, 0x33, 0x28, 0xbc, 0x19, 0xdc, 0xe8, 0x9f, 0x0d, - 0x26, 0x65, 0x5a, 0x2f, 0x2f, 0x8e, 0x62, 0xf3, 0x56, 0x4e, 0x58, 0x1e, 0x1a, 0xaa, 0xaf, 0x19, - 0x14, 0x7b, 0x2c, 0x04, 0x64, 0xbd, 0x0a, 0xaf, 0x3c, 0xb5, 0x90, 0xac, 0xc5, 0x31, 0xa4, 0xa3, - 0xd5, 0x38, 0x2b, 0x93, 0x3a, 0x95, 0x24, 0x89, 0x34, 0x56, 0x63, 0x1a, 0x49, 0x63, 0xb5, 0xd8, - 0x40, 0x36, 0x0e, 0xc6, 0x63, 0x56, 0x26, 0xf5, 0xe2, 0x7a, 0x86, 0x89, 0x64, 0x2f, 0x10, 0xf2, - 0xc6, 0xbb, 0xb1, 0xc7, 0xfc, 0x50, 0x88, 0x80, 0xfe, 0xd4, 0x3a, 0x6d, 0x70, 0x5e, 0x26, 0xf5, - 0x5a, 0xb2, 0x26, 0x36, 0xd8, 0x4f, 0x83, 0xff, 0xca, 0xa4, 0xce, 0x24, 0x6b, 0xb1, 0x81, 0xb9, - 0xb6, 0x8d, 0x19, 0x02, 0x16, 0x7c, 0xd3, 0xe4, 0x88, 0x07, 0xe5, 0x1b, 0x13, 0x70, 0x41, 0xab, - 0xe5, 0xe4, 0xc4, 0x09, 0xe4, 0xad, 0x7a, 0x73, 0x1e, 0x81, 0x97, 0x44, 0xc3, 0xd4, 0x76, 0xce, - 0xe3, 0x72, 0xa2, 0x64, 0x44, 0x05, 0xf9, 0xbb, 0x0a, 0xc1, 0xe3, 0x8a, 0x43, 0x5a, 0x4d, 0x21, - 0x3d, 0xee, 0x42, 0xf0, 0x32, 0x96, 0xc4, 0x29, 0xa4, 0x4a, 0x0f, 0xb8, 0xfe, 0x15, 0xe3, 0xee, - 0xe6, 0xe1, 0xb6, 0x0b, 0xfe, 0x43, 0x52, 0xad, 0x3a, 0x87, 0x9c, 0x47, 0xe8, 0xfe, 0x4e, 0xb5, - 0x94, 0x39, 0x5d, 0xc4, 0x9a, 0x98, 0x56, 0x41, 0x71, 0x7c, 0x2b, 0xc9, 0xba, 0xba, 0x83, 0x62, - 0xbf, 0xe1, 0xaf, 0x33, 0x3f, 0x72, 0x48, 0xe3, 0x7b, 0xa3, 0x7b, 0x9a, 0xf3, 0x45, 0x97, 0xdf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xef, 0x27, 0x99, 0xf7, 0x17, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/continuity/proto/manifest.proto b/vendor/github.com/containerd/continuity/proto/manifest.proto deleted file mode 100644 index 66ef80f054ed7..0000000000000 --- a/vendor/github.com/containerd/continuity/proto/manifest.proto +++ /dev/null @@ -1,97 +0,0 @@ -syntax = "proto3"; - -package proto; - -// Manifest specifies the entries in a container bundle, keyed and sorted by -// path. -message Manifest { - repeated Resource resource = 1; -} - -message Resource { - // Path specifies the path from the bundle root. If more than one - // path is present, the entry may represent a hardlink, rather than using - // a link target. The path format is operating system specific. - repeated string path = 1; - - // NOTE(stevvooe): Need to define clear precedence for user/group/uid/gid precedence. - - // Uid specifies the user id for the resource. - int64 uid = 2; - - // Gid specifies the group id for the resource. - int64 gid = 3; - - // user and group are not currently used but their field numbers have been - // reserved for future use. As such, they are marked as deprecated. - string user = 4 [deprecated=true]; // "deprecated" stands for "reserved" here - string group = 5 [deprecated=true]; // "deprecated" stands for "reserved" here - - // Mode defines the file mode and permissions. We've used the same - // bit-packing from Go's os package, - // http://golang.org/pkg/os/#FileMode, since they've done the work of - // creating a cross-platform layout. - uint32 mode = 6; - - // NOTE(stevvooe): Beyond here, we start defining type specific fields. - - // Size specifies the size in bytes of the resource. This is only valid - // for regular files. - uint64 size = 7; - - // Digest specifies the content digest of the target file. Only valid for - // regular files. The strings are formatted in OCI style, i.e. :. - // For detailed information about the format, please refer to OCI Image Spec: - // https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification - // The digests are sorted in lexical order and implementations may choose - // which algorithms they prefer. - repeated string digest = 8; - - // Target defines the target of a hard or soft link. Absolute links start - // with a slash and specify the resource relative to the bundle root. - // Relative links do not start with a slash and are relative to the - // resource path. - string target = 9; - - // Major specifies the major device number for character and block devices. - uint64 major = 10; - - // Minor specifies the minor device number for character and block devices. - uint64 minor = 11; - - // Xattr provides storage for extended attributes for the target resource. - repeated XAttr xattr = 12; - - // Ads stores one or more alternate data streams for the target resource. - repeated ADSEntry ads = 13; - -} - -// XAttr encodes extended attributes for a resource. -message XAttr { - // Name specifies the attribute name. - string name = 1; - - // Data specifies the associated data for the attribute. - bytes data = 2; -} - -// ADSEntry encodes information for a Windows Alternate Data Stream. -message ADSEntry { - // Name specifices the stream name. - string name = 1; - - // Data specifies the stream data. - // See also the description about the digest below. - bytes data = 2; - - // Digest is a CAS representation of the stream data. - // - // At least one of data or digest MUST be specified, and either one of them - // SHOULD be specified. - // - // How to access the actual data using the digest is implementation-specific, - // and implementations can choose not to implement digest. - // So, digest SHOULD be used only when the stream data is large. - string digest = 3; -} diff --git a/vendor/github.com/containerd/continuity/resource.go b/vendor/github.com/containerd/continuity/resource.go deleted file mode 100644 index d2f52bd31a6e5..0000000000000 --- a/vendor/github.com/containerd/continuity/resource.go +++ /dev/null @@ -1,590 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "errors" - "fmt" - "os" - "reflect" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/opencontainers/go-digest" -) - -// TODO(stevvooe): A record based model, somewhat sketched out at the bottom -// of this file, will be more flexible. Another possibly is to tie the package -// interface directly to the protobuf type. This will have efficiency -// advantages at the cost coupling the nasty codegen types to the exported -// interface. - -type Resource interface { - // Path provides the primary resource path relative to the bundle root. In - // cases where resources have more than one path, such as with hard links, - // this will return the primary path, which is often just the first entry. - Path() string - - // Mode returns the - Mode() os.FileMode - - UID() int64 - GID() int64 -} - -// ByPath provides the canonical sort order for a set of resources. Use with -// sort.Stable for deterministic sorting. -type ByPath []Resource - -func (bp ByPath) Len() int { return len(bp) } -func (bp ByPath) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] } -func (bp ByPath) Less(i, j int) bool { return bp[i].Path() < bp[j].Path() } - -type XAttrer interface { - XAttrs() map[string][]byte -} - -// Hardlinkable is an interface that a resource type satisfies if it can be a -// hardlink target. -type Hardlinkable interface { - // Paths returns all paths of the resource, including the primary path - // returned by Resource.Path. If len(Paths()) > 1, the resource is a hard - // link. - Paths() []string -} - -type RegularFile interface { - Resource - XAttrer - Hardlinkable - - Size() int64 - Digests() []digest.Digest -} - -// Merge two or more Resources into new file. Typically, this should be -// used to merge regular files as hardlinks. If the files are not identical, -// other than Paths and Digests, the merge will fail and an error will be -// returned. -func Merge(fs ...Resource) (Resource, error) { - if len(fs) < 1 { - return nil, fmt.Errorf("please provide a resource to merge") - } - - if len(fs) == 1 { - return fs[0], nil - } - - var paths []string - var digests []digest.Digest - bypath := map[string][]Resource{} - - // The attributes are all compared against the first to make sure they - // agree before adding to the above collections. If any of these don't - // correctly validate, the merge fails. - prototype := fs[0] - xattrs := make(map[string][]byte) - - // initialize xattrs for use below. All files must have same xattrs. - if prototypeXAttrer, ok := prototype.(XAttrer); ok { - for attr, value := range prototypeXAttrer.XAttrs() { - xattrs[attr] = value - } - } - - for _, f := range fs { - h, isHardlinkable := f.(Hardlinkable) - if !isHardlinkable { - return nil, errNotAHardLink - } - - if f.Mode() != prototype.Mode() { - return nil, fmt.Errorf("modes do not match: %v != %v", f.Mode(), prototype.Mode()) - } - - if f.UID() != prototype.UID() { - return nil, fmt.Errorf("uid does not match: %v != %v", f.UID(), prototype.UID()) - } - - if f.GID() != prototype.GID() { - return nil, fmt.Errorf("gid does not match: %v != %v", f.GID(), prototype.GID()) - } - - if xattrer, ok := f.(XAttrer); ok { - fxattrs := xattrer.XAttrs() - if !reflect.DeepEqual(fxattrs, xattrs) { - return nil, fmt.Errorf("resource %q xattrs do not match: %v != %v", f, fxattrs, xattrs) - } - } - - for _, p := range h.Paths() { - pfs, ok := bypath[p] - if !ok { - // ensure paths are unique by only appending on a new path. - paths = append(paths, p) - } - - bypath[p] = append(pfs, f) - } - - if regFile, isRegFile := f.(RegularFile); isRegFile { - prototypeRegFile, prototypeIsRegFile := prototype.(RegularFile) - if !prototypeIsRegFile { - return nil, errors.New("prototype is not a regular file") - } - - if regFile.Size() != prototypeRegFile.Size() { - return nil, fmt.Errorf("size does not match: %v != %v", regFile.Size(), prototypeRegFile.Size()) - } - - digests = append(digests, regFile.Digests()...) - } else if device, isDevice := f.(Device); isDevice { - prototypeDevice, prototypeIsDevice := prototype.(Device) - if !prototypeIsDevice { - return nil, errors.New("prototype is not a device") - } - - if device.Major() != prototypeDevice.Major() { - return nil, fmt.Errorf("major number does not match: %v != %v", device.Major(), prototypeDevice.Major()) - } - if device.Minor() != prototypeDevice.Minor() { - return nil, fmt.Errorf("minor number does not match: %v != %v", device.Minor(), prototypeDevice.Minor()) - } - } else if _, isNamedPipe := f.(NamedPipe); isNamedPipe { - _, prototypeIsNamedPipe := prototype.(NamedPipe) - if !prototypeIsNamedPipe { - return nil, errors.New("prototype is not a named pipe") - } - } else { - return nil, errNotAHardLink - } - } - - sort.Stable(sort.StringSlice(paths)) - - // Choose a "canonical" file. Really, it is just the first file to sort - // against. We also effectively select the very first digest as the - // "canonical" one for this file. - first := bypath[paths[0]][0] - - resource := resource{ - paths: paths, - mode: first.Mode(), - uid: first.UID(), - gid: first.GID(), - xattrs: xattrs, - } - - switch typedF := first.(type) { - case RegularFile: - var err error - digests, err = uniqifyDigests(digests...) - if err != nil { - return nil, err - } - - return ®ularFile{ - resource: resource, - size: typedF.Size(), - digests: digests, - }, nil - case Device: - return &device{ - resource: resource, - major: typedF.Major(), - minor: typedF.Minor(), - }, nil - - case NamedPipe: - return &namedPipe{ - resource: resource, - }, nil - - default: - return nil, errNotAHardLink - } -} - -type Directory interface { - Resource - XAttrer - - // Directory is a no-op method to identify directory objects by interface. - Directory() -} - -type SymLink interface { - Resource - - // Target returns the target of the symlink contained in the . - Target() string -} - -type NamedPipe interface { - Resource - Hardlinkable - XAttrer - - // Pipe is a no-op method to allow consistent resolution of NamedPipe - // interface. - Pipe() -} - -type Device interface { - Resource - Hardlinkable - XAttrer - - Major() uint64 - Minor() uint64 -} - -type resource struct { - paths []string - mode os.FileMode - uid, gid int64 - xattrs map[string][]byte -} - -var _ Resource = &resource{} - -func (r *resource) Path() string { - if len(r.paths) < 1 { - return "" - } - - return r.paths[0] -} - -func (r *resource) Mode() os.FileMode { - return r.mode -} - -func (r *resource) UID() int64 { - return r.uid -} - -func (r *resource) GID() int64 { - return r.gid -} - -type regularFile struct { - resource - size int64 - digests []digest.Digest -} - -var _ RegularFile = ®ularFile{} - -// newRegularFile returns the RegularFile, using the populated base resource -// and one or more digests of the content. -func newRegularFile(base resource, paths []string, size int64, dgsts ...digest.Digest) (RegularFile, error) { - if !base.Mode().IsRegular() { - return nil, fmt.Errorf("not a regular file") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - // make our own copy of digests - ds := make([]digest.Digest, len(dgsts)) - copy(ds, dgsts) - - return ®ularFile{ - resource: base, - size: size, - digests: ds, - }, nil -} - -func (rf *regularFile) Paths() []string { - paths := make([]string, len(rf.paths)) - copy(paths, rf.paths) - return paths -} - -func (rf *regularFile) Size() int64 { - return rf.size -} - -func (rf *regularFile) Digests() []digest.Digest { - digests := make([]digest.Digest, len(rf.digests)) - copy(digests, rf.digests) - return digests -} - -func (rf *regularFile) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(rf.xattrs)) - - for attr, value := range rf.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type directory struct { - resource -} - -var _ Directory = &directory{} - -func newDirectory(base resource) (Directory, error) { - if !base.Mode().IsDir() { - return nil, fmt.Errorf("not a directory") - } - - return &directory{ - resource: base, - }, nil -} - -func (d *directory) Directory() {} - -func (d *directory) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type symLink struct { - resource - target string -} - -var _ SymLink = &symLink{} - -func newSymLink(base resource, target string) (SymLink, error) { - if base.Mode()&os.ModeSymlink == 0 { - return nil, fmt.Errorf("not a symlink") - } - - return &symLink{ - resource: base, - target: target, - }, nil -} - -func (l *symLink) Target() string { - return l.target -} - -type namedPipe struct { - resource -} - -var _ NamedPipe = &namedPipe{} - -func newNamedPipe(base resource, paths []string) (NamedPipe, error) { - if base.Mode()&os.ModeNamedPipe == 0 { - return nil, fmt.Errorf("not a namedpipe") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &namedPipe{ - resource: base, - }, nil -} - -func (np *namedPipe) Pipe() {} - -func (np *namedPipe) Paths() []string { - paths := make([]string, len(np.paths)) - copy(paths, np.paths) - return paths -} - -func (np *namedPipe) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(np.xattrs)) - - for attr, value := range np.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type device struct { - resource - major, minor uint64 -} - -var _ Device = &device{} - -func newDevice(base resource, paths []string, major, minor uint64) (Device, error) { - if base.Mode()&os.ModeDevice == 0 { - return nil, fmt.Errorf("not a device") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &device{ - resource: base, - major: major, - minor: minor, - }, nil -} - -func (d *device) Paths() []string { - paths := make([]string, len(d.paths)) - copy(paths, d.paths) - return paths -} - -func (d *device) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -func (d device) Major() uint64 { - return d.major -} - -func (d device) Minor() uint64 { - return d.minor -} - -// toProto converts a resource to a protobuf record. We'd like to push this -// the individual types but we want to keep this all together during -// prototyping. -func toProto(resource Resource) *pb.Resource { - b := &pb.Resource{ - Path: []string{resource.Path()}, - Mode: uint32(resource.Mode()), - Uid: resource.UID(), - Gid: resource.GID(), - } - - if xattrer, ok := resource.(XAttrer); ok { - // Sorts the XAttrs by name for consistent ordering. - keys := []string{} - xattrs := xattrer.XAttrs() - for k := range xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - b.Xattr = append(b.Xattr, &pb.XAttr{Name: k, Data: xattrs[k]}) - } - } - - switch r := resource.(type) { - case RegularFile: - b.Path = r.Paths() - b.Size = uint64(r.Size()) - - for _, dgst := range r.Digests() { - b.Digest = append(b.Digest, dgst.String()) - } - case SymLink: - b.Target = r.Target() - case Device: - b.Major, b.Minor = r.Major(), r.Minor() - b.Path = r.Paths() - case NamedPipe: - b.Path = r.Paths() - } - - // enforce a few stability guarantees that may not be provided by the - // resource implementation. - sort.Strings(b.Path) - - return b -} - -// fromProto converts from a protobuf Resource to a Resource interface. -func fromProto(b *pb.Resource) (Resource, error) { - base := &resource{ - paths: b.Path, - mode: os.FileMode(b.Mode), - uid: b.Uid, - gid: b.Gid, - } - - base.xattrs = make(map[string][]byte, len(b.Xattr)) - - for _, attr := range b.Xattr { - base.xattrs[attr.Name] = attr.Data - } - - switch { - case base.Mode().IsRegular(): - dgsts := make([]digest.Digest, len(b.Digest)) - for i, dgst := range b.Digest { - // TODO(stevvooe): Should we be validating at this point? - dgsts[i] = digest.Digest(dgst) - } - - return newRegularFile(*base, b.Path, int64(b.Size), dgsts...) - case base.Mode().IsDir(): - return newDirectory(*base) - case base.Mode()&os.ModeSymlink != 0: - return newSymLink(*base, b.Target) - case base.Mode()&os.ModeNamedPipe != 0: - return newNamedPipe(*base, b.Path) - case base.Mode()&os.ModeDevice != 0: - return newDevice(*base, b.Path, b.Major, b.Minor) - } - - return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode()) -} - -// NOTE(stevvooe): An alternative model that supports inline declaration. -// Convenient for unit testing where inline declarations may be desirable but -// creates an awkward API for the standard use case. - -// type ResourceKind int - -// const ( -// ResourceRegularFile = iota + 1 -// ResourceDirectory -// ResourceSymLink -// Resource -// ) - -// type Resource struct { -// Kind ResourceKind -// Paths []string -// Mode os.FileMode -// UID string -// GID string -// Size int64 -// Digests []digest.Digest -// Target string -// Major, Minor int -// XAttrs map[string][]byte -// } - -// type RegularFile struct { -// Paths []string -// Size int64 -// Digests []digest.Digest -// Perm os.FileMode // os.ModePerm + sticky, setuid, setgid -// } diff --git a/vendor/github.com/containerd/continuity/resource_unix.go b/vendor/github.com/containerd/continuity/resource_unix.go deleted file mode 100644 index 0e103ccc5c7a0..0000000000000 --- a/vendor/github.com/containerd/continuity/resource_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - // TODO(stevvooe): This need to be resolved for the container's root, - // where here we are really getting the host OS's value. We need to allow - // this be passed in and fixed up to make these uid/gid mappings portable. - // Either this can be part of the driver or we can achieve it through some - // other mechanism. - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - // TODO(stevvooe): This may not be a hard error for all platforms. We - // may want to move this to the driver. - return nil, fmt.Errorf("unable to resolve syscall.Stat_t from (os.FileInfo).Sys(): %#v", fi) - } - - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - - uid: int64(sys.Uid), - gid: int64(sys.Gid), - - // NOTE(stevvooe): Population of shared xattrs field is deferred to - // the resource types that populate it. Since they are a property of - // the context, they must set there. - }, nil -}